2024-11-20 17:24:10,573 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 17:24:10,586 main DEBUG Took 0.010838 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 17:24:10,586 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 17:24:10,587 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 17:24:10,588 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 17:24:10,589 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,596 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 17:24:10,608 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,609 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,610 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,610 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,611 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,611 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,612 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,612 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,613 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,613 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,613 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,614 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,614 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,615 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,615 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,615 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,616 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,616 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,616 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,617 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,617 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,617 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,618 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,618 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 17:24:10,618 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,619 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 17:24:10,620 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 17:24:10,621 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 17:24:10,623 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 17:24:10,624 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 17:24:10,625 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 17:24:10,625 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 17:24:10,634 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 17:24:10,636 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 17:24:10,638 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 17:24:10,638 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 17:24:10,639 main DEBUG createAppenders(={Console}) 2024-11-20 17:24:10,639 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 17:24:10,640 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 17:24:10,640 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 17:24:10,641 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 17:24:10,641 main DEBUG OutputStream closed 2024-11-20 17:24:10,641 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 17:24:10,641 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 17:24:10,642 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 17:24:10,718 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 17:24:10,721 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 17:24:10,722 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 17:24:10,723 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 17:24:10,724 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 17:24:10,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 17:24:10,725 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 17:24:10,726 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 17:24:10,726 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 17:24:10,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 17:24:10,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 17:24:10,727 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 17:24:10,728 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 17:24:10,728 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 17:24:10,729 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 17:24:10,729 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 17:24:10,729 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 17:24:10,730 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 17:24:10,733 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 17:24:10,733 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 17:24:10,733 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 17:24:10,734 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T17:24:10,957 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978 2024-11-20 17:24:10,960 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 17:24:10,960 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T17:24:10,969 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-20T17:24:10,988 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T17:24:10,991 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/cluster_57d363a1-1306-466a-894b-887e1f24f2a3, deleteOnExit=true 2024-11-20T17:24:10,991 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T17:24:10,992 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/test.cache.data in system properties and HBase conf 2024-11-20T17:24:10,992 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T17:24:10,993 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/hadoop.log.dir in system properties and HBase conf 2024-11-20T17:24:10,994 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T17:24:10,994 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T17:24:10,995 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T17:24:11,094 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T17:24:11,190 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T17:24:11,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T17:24:11,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T17:24:11,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T17:24:11,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T17:24:11,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T17:24:11,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T17:24:11,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T17:24:11,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T17:24:11,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T17:24:11,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/nfs.dump.dir in system properties and HBase conf 2024-11-20T17:24:11,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/java.io.tmpdir in system properties and HBase conf 2024-11-20T17:24:11,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T17:24:11,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T17:24:11,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T17:24:12,054 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T17:24:12,142 INFO [Time-limited test {}] log.Log(170): Logging initialized @2269ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T17:24:12,218 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T17:24:12,281 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T17:24:12,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T17:24:12,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T17:24:12,302 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T17:24:12,314 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T17:24:12,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/hadoop.log.dir/,AVAILABLE} 2024-11-20T17:24:12,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T17:24:12,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f0d4558{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/java.io.tmpdir/jetty-localhost-34477-hadoop-hdfs-3_4_1-tests_jar-_-any-8221950288888950973/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T17:24:12,527 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:34477} 2024-11-20T17:24:12,527 INFO [Time-limited test {}] server.Server(415): Started @2656ms 2024-11-20T17:24:12,917 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T17:24:12,925 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T17:24:12,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T17:24:12,926 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T17:24:12,926 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-20T17:24:12,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/hadoop.log.dir/,AVAILABLE} 2024-11-20T17:24:12,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T17:24:13,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bd2e890{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/java.io.tmpdir/jetty-localhost-32979-hadoop-hdfs-3_4_1-tests_jar-_-any-17376557551877897998/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T17:24:13,053 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:32979} 2024-11-20T17:24:13,053 INFO [Time-limited test {}] server.Server(415): Started @3181ms 2024-11-20T17:24:13,110 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T17:24:13,565 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/cluster_57d363a1-1306-466a-894b-887e1f24f2a3/dfs/data/data2/current/BP-1213880657-172.17.0.2-1732123451817/current, will proceed with Du for space computation calculation, 2024-11-20T17:24:13,565 WARN [Thread-71 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/cluster_57d363a1-1306-466a-894b-887e1f24f2a3/dfs/data/data1/current/BP-1213880657-172.17.0.2-1732123451817/current, will proceed with Du for space computation calculation, 2024-11-20T17:24:13,609 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T17:24:13,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd75d37b179a8ffc8 with lease ID 0x995d3d4a54a1906a: Processing first storage report for DS-1fab594c-a880-4000-8925-7b1579b989ec from datanode DatanodeRegistration(127.0.0.1:36709, datanodeUuid=f4cd3955-a200-4582-9dfd-eca8d5ad5f92, infoPort=45017, infoSecurePort=0, ipcPort=45699, storageInfo=lv=-57;cid=testClusterID;nsid=1655386123;c=1732123451817) 2024-11-20T17:24:13,676 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd75d37b179a8ffc8 with lease ID 0x995d3d4a54a1906a: from storage DS-1fab594c-a880-4000-8925-7b1579b989ec node DatanodeRegistration(127.0.0.1:36709, datanodeUuid=f4cd3955-a200-4582-9dfd-eca8d5ad5f92, infoPort=45017, infoSecurePort=0, ipcPort=45699, storageInfo=lv=-57;cid=testClusterID;nsid=1655386123;c=1732123451817), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-20T17:24:13,676 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd75d37b179a8ffc8 with lease ID 0x995d3d4a54a1906a: Processing first storage report for DS-0951150b-db5e-4261-aa0b-bb7e1308c20c from datanode DatanodeRegistration(127.0.0.1:36709, datanodeUuid=f4cd3955-a200-4582-9dfd-eca8d5ad5f92, infoPort=45017, infoSecurePort=0, ipcPort=45699, storageInfo=lv=-57;cid=testClusterID;nsid=1655386123;c=1732123451817) 2024-11-20T17:24:13,676 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd75d37b179a8ffc8 with lease ID 0x995d3d4a54a1906a: from storage DS-0951150b-db5e-4261-aa0b-bb7e1308c20c node DatanodeRegistration(127.0.0.1:36709, datanodeUuid=f4cd3955-a200-4582-9dfd-eca8d5ad5f92, infoPort=45017, infoSecurePort=0, ipcPort=45699, storageInfo=lv=-57;cid=testClusterID;nsid=1655386123;c=1732123451817), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T17:24:13,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978 2024-11-20T17:24:13,780 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/cluster_57d363a1-1306-466a-894b-887e1f24f2a3/zookeeper_0, clientPort=56028, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/cluster_57d363a1-1306-466a-894b-887e1f24f2a3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/cluster_57d363a1-1306-466a-894b-887e1f24f2a3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T17:24:13,790 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=56028 2024-11-20T17:24:13,804 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:24:13,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:24:14,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741825_1001 (size=7) 2024-11-20T17:24:14,443 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff with version=8 2024-11-20T17:24:14,443 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/hbase-staging 2024-11-20T17:24:14,575 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T17:24:14,847 INFO [Time-limited test {}] client.ConnectionUtils(129): master/d514dc944523:0 server-side Connection retries=45 2024-11-20T17:24:14,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T17:24:14,867 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T17:24:14,867 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T17:24:14,867 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T17:24:14,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T17:24:15,002 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T17:24:15,062 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T17:24:15,070 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T17:24:15,074 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T17:24:15,102 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 24370 (auto-detected) 2024-11-20T17:24:15,103 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T17:24:15,122 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35243 2024-11-20T17:24:15,129 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:24:15,132 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:24:15,144 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35243 connecting to ZooKeeper ensemble=127.0.0.1:56028 2024-11-20T17:24:15,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352430x0, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T17:24:15,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35243-0x10015f622270000 connected 2024-11-20T17:24:15,208 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T17:24:15,211 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T17:24:15,214 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T17:24:15,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35243 2024-11-20T17:24:15,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35243 2024-11-20T17:24:15,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35243 2024-11-20T17:24:15,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35243 2024-11-20T17:24:15,224 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35243 2024-11-20T17:24:15,231 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff, hbase.cluster.distributed=false 2024-11-20T17:24:15,294 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/d514dc944523:0 server-side Connection retries=45 2024-11-20T17:24:15,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T17:24:15,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T17:24:15,295 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T17:24:15,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T17:24:15,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T17:24:15,297 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T17:24:15,299 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T17:24:15,300 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44015 2024-11-20T17:24:15,302 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T17:24:15,307 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T17:24:15,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:24:15,312 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:24:15,315 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44015 connecting to ZooKeeper ensemble=127.0.0.1:56028 2024-11-20T17:24:15,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440150x0, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T17:24:15,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44015-0x10015f622270001 connected 2024-11-20T17:24:15,320 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T17:24:15,322 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T17:24:15,323 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T17:24:15,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44015 2024-11-20T17:24:15,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44015 2024-11-20T17:24:15,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44015 2024-11-20T17:24:15,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44015 2024-11-20T17:24:15,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44015 2024-11-20T17:24:15,327 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/d514dc944523,35243,1732123454567 2024-11-20T17:24:15,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T17:24:15,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T17:24:15,336 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d514dc944523,35243,1732123454567 2024-11-20T17:24:15,343 DEBUG [M:0;d514dc944523:35243 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d514dc944523:35243 2024-11-20T17:24:15,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T17:24:15,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T17:24:15,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:15,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:15,357 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T17:24:15,358 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d514dc944523,35243,1732123454567 from backup master directory 2024-11-20T17:24:15,359 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T17:24:15,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d514dc944523,35243,1732123454567 2024-11-20T17:24:15,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T17:24:15,361 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T17:24:15,362 WARN [master/d514dc944523:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T17:24:15,362 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d514dc944523,35243,1732123454567 2024-11-20T17:24:15,364 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T17:24:15,366 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T17:24:15,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741826_1002 (size=42) 2024-11-20T17:24:15,835 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/hbase.id with ID: 18daaf8a-b545-42a8-ac13-acedb3503b25 2024-11-20T17:24:15,875 INFO [master/d514dc944523:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T17:24:15,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:15,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:15,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741827_1003 (size=196) 2024-11-20T17:24:16,334 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:24:16,337 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T17:24:16,355 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:16,359 INFO [master/d514dc944523:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T17:24:16,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741828_1004 (size=1189) 2024-11-20T17:24:16,414 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store 2024-11-20T17:24:16,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741829_1005 (size=34) 2024-11-20T17:24:16,836 INFO [master/d514dc944523:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T17:24:16,836 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:16,837 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T17:24:16,838 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:24:16,838 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:24:16,838 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T17:24:16,838 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:24:16,838 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:24:16,838 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T17:24:16,840 WARN [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/.initializing 2024-11-20T17:24:16,841 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/WALs/d514dc944523,35243,1732123454567 2024-11-20T17:24:16,847 INFO [master/d514dc944523:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T17:24:16,857 INFO [master/d514dc944523:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d514dc944523%2C35243%2C1732123454567, suffix=, logDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/WALs/d514dc944523,35243,1732123454567, archiveDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/oldWALs, maxLogs=10 2024-11-20T17:24:16,878 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/WALs/d514dc944523,35243,1732123454567/d514dc944523%2C35243%2C1732123454567.1732123456861, exclude list is [], retry=0 2024-11-20T17:24:16,894 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36709,DS-1fab594c-a880-4000-8925-7b1579b989ec,DISK] 2024-11-20T17:24:16,897 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T17:24:16,934 INFO [master/d514dc944523:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/WALs/d514dc944523,35243,1732123454567/d514dc944523%2C35243%2C1732123454567.1732123456861 2024-11-20T17:24:16,935 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45017:45017)] 2024-11-20T17:24:16,935 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:24:16,936 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:16,939 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:16,940 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:16,978 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:17,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T17:24:17,006 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:17,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:24:17,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:17,012 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T17:24:17,012 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:17,013 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:17,014 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:17,016 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T17:24:17,017 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:17,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:17,018 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:17,020 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T17:24:17,020 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:17,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:17,024 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:17,026 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:17,034 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T17:24:17,037 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T17:24:17,041 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:24:17,042 INFO [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67599877, jitterRate=0.007316663861274719}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T17:24:17,046 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T17:24:17,047 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T17:24:17,075 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4970aa25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:17,109 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T17:24:17,120 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T17:24:17,120 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T17:24:17,122 INFO [master/d514dc944523:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T17:24:17,124 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T17:24:17,129 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-20T17:24:17,129 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T17:24:17,155 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T17:24:17,170 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T17:24:17,173 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T17:24:17,176 INFO [master/d514dc944523:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T17:24:17,178 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T17:24:17,180 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T17:24:17,183 INFO [master/d514dc944523:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T17:24:17,187 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T17:24:17,189 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T17:24:17,190 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T17:24:17,192 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T17:24:17,204 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T17:24:17,206 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T17:24:17,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T17:24:17,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T17:24:17,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:17,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:17,211 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=d514dc944523,35243,1732123454567, sessionid=0x10015f622270000, setting cluster-up flag (Was=false) 2024-11-20T17:24:17,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:17,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:17,231 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T17:24:17,233 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d514dc944523,35243,1732123454567 2024-11-20T17:24:17,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:17,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:17,243 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T17:24:17,244 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d514dc944523,35243,1732123454567 2024-11-20T17:24:17,323 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T17:24:17,329 INFO [master/d514dc944523:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T17:24:17,331 INFO [master/d514dc944523:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T17:24:17,337 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d514dc944523,35243,1732123454567 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T17:24:17,340 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d514dc944523:0, corePoolSize=5, maxPoolSize=5 2024-11-20T17:24:17,340 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d514dc944523:0, corePoolSize=5, maxPoolSize=5 2024-11-20T17:24:17,341 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d514dc944523:0, corePoolSize=5, maxPoolSize=5 2024-11-20T17:24:17,341 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d514dc944523:0, corePoolSize=5, maxPoolSize=5 2024-11-20T17:24:17,341 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d514dc944523:0, corePoolSize=10, maxPoolSize=10 2024-11-20T17:24:17,341 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,341 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d514dc944523:0, corePoolSize=2, maxPoolSize=2 2024-11-20T17:24:17,341 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,341 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d514dc944523:44015 2024-11-20T17:24:17,342 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732123487342 2024-11-20T17:24:17,343 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1008): ClusterId : 18daaf8a-b545-42a8-ac13-acedb3503b25 2024-11-20T17:24:17,344 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T17:24:17,345 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T17:24:17,346 DEBUG [RS:0;d514dc944523:44015 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T17:24:17,347 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T17:24:17,347 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T17:24:17,349 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T17:24:17,349 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T17:24:17,350 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T17:24:17,350 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T17:24:17,351 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,351 DEBUG [RS:0;d514dc944523:44015 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T17:24:17,351 DEBUG [RS:0;d514dc944523:44015 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T17:24:17,352 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T17:24:17,352 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:17,352 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T17:24:17,353 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T17:24:17,353 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T17:24:17,354 DEBUG [RS:0;d514dc944523:44015 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T17:24:17,355 DEBUG [RS:0;d514dc944523:44015 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9b0d9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:17,356 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T17:24:17,356 INFO [master/d514dc944523:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T17:24:17,356 DEBUG [RS:0;d514dc944523:44015 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f24e3ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d514dc944523/172.17.0.2:0 2024-11-20T17:24:17,358 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d514dc944523:0:becomeActiveMaster-HFileCleaner.large.0-1732123457358,5,FailOnTimeoutGroup] 2024-11-20T17:24:17,358 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d514dc944523:0:becomeActiveMaster-HFileCleaner.small.0-1732123457358,5,FailOnTimeoutGroup] 2024-11-20T17:24:17,359 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,359 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T17:24:17,360 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,361 INFO [RS:0;d514dc944523:44015 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T17:24:17,361 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,361 INFO [RS:0;d514dc944523:44015 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T17:24:17,361 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T17:24:17,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741831_1007 (size=1039) 2024-11-20T17:24:17,363 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(3073): reportForDuty to master=d514dc944523,35243,1732123454567 with isa=d514dc944523/172.17.0.2:44015, startcode=1732123455293 2024-11-20T17:24:17,374 DEBUG [RS:0;d514dc944523:44015 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T17:24:17,406 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57669, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T17:24:17,412 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35243 {}] master.ServerManager(332): Checking decommissioned status of RegionServer d514dc944523,44015,1732123455293 2024-11-20T17:24:17,414 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35243 {}] master.ServerManager(486): Registering regionserver=d514dc944523,44015,1732123455293 2024-11-20T17:24:17,429 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:24:17,429 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:40219 2024-11-20T17:24:17,429 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T17:24:17,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T17:24:17,434 DEBUG [RS:0;d514dc944523:44015 {}] zookeeper.ZKUtil(111): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d514dc944523,44015,1732123455293 2024-11-20T17:24:17,434 WARN [RS:0;d514dc944523:44015 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T17:24:17,435 INFO [RS:0;d514dc944523:44015 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T17:24:17,435 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/WALs/d514dc944523,44015,1732123455293 2024-11-20T17:24:17,437 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d514dc944523,44015,1732123455293] 2024-11-20T17:24:17,448 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T17:24:17,459 INFO [RS:0;d514dc944523:44015 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T17:24:17,472 INFO [RS:0;d514dc944523:44015 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T17:24:17,475 INFO [RS:0;d514dc944523:44015 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T17:24:17,475 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,476 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T17:24:17,482 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,483 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,483 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,483 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,483 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,483 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,483 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d514dc944523:0, corePoolSize=2, maxPoolSize=2 2024-11-20T17:24:17,484 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,484 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,484 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,484 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,484 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d514dc944523:0, corePoolSize=1, maxPoolSize=1 2024-11-20T17:24:17,484 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d514dc944523:0, corePoolSize=3, maxPoolSize=3 2024-11-20T17:24:17,484 DEBUG [RS:0;d514dc944523:44015 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0, corePoolSize=3, maxPoolSize=3 2024-11-20T17:24:17,485 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,485 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,486 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,486 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,486 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,44015,1732123455293-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T17:24:17,506 INFO [RS:0;d514dc944523:44015 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T17:24:17,508 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,44015,1732123455293-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:17,528 INFO [RS:0;d514dc944523:44015 {}] regionserver.Replication(204): d514dc944523,44015,1732123455293 started 2024-11-20T17:24:17,528 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1767): Serving as d514dc944523,44015,1732123455293, RpcServer on d514dc944523/172.17.0.2:44015, sessionid=0x10015f622270001 2024-11-20T17:24:17,529 DEBUG [RS:0;d514dc944523:44015 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T17:24:17,529 DEBUG [RS:0;d514dc944523:44015 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d514dc944523,44015,1732123455293 2024-11-20T17:24:17,529 DEBUG [RS:0;d514dc944523:44015 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd514dc944523,44015,1732123455293' 2024-11-20T17:24:17,529 DEBUG [RS:0;d514dc944523:44015 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T17:24:17,530 DEBUG [RS:0;d514dc944523:44015 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T17:24:17,530 DEBUG [RS:0;d514dc944523:44015 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T17:24:17,531 DEBUG [RS:0;d514dc944523:44015 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T17:24:17,531 DEBUG [RS:0;d514dc944523:44015 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d514dc944523,44015,1732123455293 2024-11-20T17:24:17,531 DEBUG [RS:0;d514dc944523:44015 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd514dc944523,44015,1732123455293' 2024-11-20T17:24:17,531 DEBUG [RS:0;d514dc944523:44015 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T17:24:17,531 DEBUG [RS:0;d514dc944523:44015 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T17:24:17,532 DEBUG [RS:0;d514dc944523:44015 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T17:24:17,532 INFO [RS:0;d514dc944523:44015 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T17:24:17,532 INFO [RS:0;d514dc944523:44015 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T17:24:17,638 INFO [RS:0;d514dc944523:44015 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T17:24:17,641 INFO [RS:0;d514dc944523:44015 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d514dc944523%2C44015%2C1732123455293, suffix=, logDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/WALs/d514dc944523,44015,1732123455293, archiveDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/oldWALs, maxLogs=32 2024-11-20T17:24:17,658 DEBUG [RS:0;d514dc944523:44015 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/WALs/d514dc944523,44015,1732123455293/d514dc944523%2C44015%2C1732123455293.1732123457643, exclude list is [], retry=0 2024-11-20T17:24:17,663 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36709,DS-1fab594c-a880-4000-8925-7b1579b989ec,DISK] 2024-11-20T17:24:17,667 INFO [RS:0;d514dc944523:44015 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/WALs/d514dc944523,44015,1732123455293/d514dc944523%2C44015%2C1732123455293.1732123457643 2024-11-20T17:24:17,667 DEBUG [RS:0;d514dc944523:44015 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45017:45017)] 2024-11-20T17:24:17,764 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T17:24:17,764 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:24:17,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741833_1009 (size=32) 2024-11-20T17:24:18,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:18,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T17:24:18,181 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T17:24:18,181 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:18,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:24:18,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T17:24:18,185 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T17:24:18,185 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:18,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:24:18,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T17:24:18,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T17:24:18,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:18,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:24:18,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740 2024-11-20T17:24:18,192 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740 2024-11-20T17:24:18,195 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:24:18,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T17:24:18,201 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:24:18,202 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60024136, jitterRate=-0.10557067394256592}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:24:18,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T17:24:18,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T17:24:18,204 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T17:24:18,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T17:24:18,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T17:24:18,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T17:24:18,206 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T17:24:18,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T17:24:18,208 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T17:24:18,209 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T17:24:18,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T17:24:18,222 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T17:24:18,224 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T17:24:18,376 DEBUG [d514dc944523:35243 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T17:24:18,381 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:18,386 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d514dc944523,44015,1732123455293, state=OPENING 2024-11-20T17:24:18,391 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T17:24:18,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:18,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:18,394 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T17:24:18,394 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T17:24:18,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:24:18,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:18,572 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T17:24:18,575 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T17:24:18,586 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T17:24:18,587 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T17:24:18,587 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T17:24:18,590 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d514dc944523%2C44015%2C1732123455293.meta, suffix=.meta, logDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/WALs/d514dc944523,44015,1732123455293, archiveDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/oldWALs, maxLogs=32 2024-11-20T17:24:18,607 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/WALs/d514dc944523,44015,1732123455293/d514dc944523%2C44015%2C1732123455293.meta.1732123458592.meta, exclude list is [], retry=0 2024-11-20T17:24:18,611 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36709,DS-1fab594c-a880-4000-8925-7b1579b989ec,DISK] 2024-11-20T17:24:18,614 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/WALs/d514dc944523,44015,1732123455293/d514dc944523%2C44015%2C1732123455293.meta.1732123458592.meta 2024-11-20T17:24:18,615 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45017:45017)] 2024-11-20T17:24:18,615 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:24:18,616 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T17:24:18,676 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T17:24:18,681 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T17:24:18,685 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T17:24:18,685 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:18,685 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T17:24:18,685 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T17:24:18,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T17:24:18,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T17:24:18,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:18,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:24:18,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T17:24:18,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T17:24:18,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:18,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:24:18,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T17:24:18,696 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T17:24:18,696 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:18,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T17:24:18,698 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740 2024-11-20T17:24:18,700 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740 2024-11-20T17:24:18,703 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:24:18,706 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T17:24:18,708 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70746754, jitterRate=0.05420878529548645}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:24:18,709 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T17:24:18,716 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732123458564 2024-11-20T17:24:18,728 DEBUG [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T17:24:18,728 INFO [RS_OPEN_META-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T17:24:18,729 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:18,731 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d514dc944523,44015,1732123455293, state=OPEN 2024-11-20T17:24:18,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T17:24:18,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T17:24:18,736 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T17:24:18,736 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T17:24:18,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T17:24:18,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=d514dc944523,44015,1732123455293 in 340 msec 2024-11-20T17:24:18,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T17:24:18,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 527 msec 2024-11-20T17:24:18,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4720 sec 2024-11-20T17:24:18,752 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732123458752, completionTime=-1 2024-11-20T17:24:18,752 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T17:24:18,752 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T17:24:18,791 DEBUG [hconnection-0x4e74fea5-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:18,793 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:18,803 INFO [master/d514dc944523:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T17:24:18,804 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732123518804 2024-11-20T17:24:18,804 INFO [master/d514dc944523:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732123578804 2024-11-20T17:24:18,804 INFO [master/d514dc944523:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-11-20T17:24:18,828 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,35243,1732123454567-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:18,828 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,35243,1732123454567-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:18,828 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,35243,1732123454567-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:18,829 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d514dc944523:35243, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:18,830 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:18,835 DEBUG [master/d514dc944523:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T17:24:18,838 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T17:24:18,839 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T17:24:18,846 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T17:24:18,849 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:24:18,850 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:18,852 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:24:18,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741835_1011 (size=358) 2024-11-20T17:24:19,266 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e60665fdde91447ed275607cc98db134, NAME => 'hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:24:19,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741836_1012 (size=42) 2024-11-20T17:24:19,677 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:19,677 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing e60665fdde91447ed275607cc98db134, disabling compactions & flushes 2024-11-20T17:24:19,677 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:24:19,678 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:24:19,678 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. after waiting 0 ms 2024-11-20T17:24:19,678 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:24:19,678 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:24:19,678 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for e60665fdde91447ed275607cc98db134: 2024-11-20T17:24:19,680 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:24:19,687 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732123459681"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123459681"}]},"ts":"1732123459681"} 2024-11-20T17:24:19,714 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:24:19,717 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:24:19,720 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123459717"}]},"ts":"1732123459717"} 2024-11-20T17:24:19,725 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T17:24:19,731 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e60665fdde91447ed275607cc98db134, ASSIGN}] 2024-11-20T17:24:19,734 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=e60665fdde91447ed275607cc98db134, ASSIGN 2024-11-20T17:24:19,736 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=e60665fdde91447ed275607cc98db134, ASSIGN; state=OFFLINE, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=false 2024-11-20T17:24:19,887 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e60665fdde91447ed275607cc98db134, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:19,891 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure e60665fdde91447ed275607cc98db134, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:24:20,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:20,051 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:24:20,051 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => e60665fdde91447ed275607cc98db134, NAME => 'hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:24:20,052 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace e60665fdde91447ed275607cc98db134 2024-11-20T17:24:20,052 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:20,052 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for e60665fdde91447ed275607cc98db134 2024-11-20T17:24:20,052 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for e60665fdde91447ed275607cc98db134 2024-11-20T17:24:20,057 INFO [StoreOpener-e60665fdde91447ed275607cc98db134-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e60665fdde91447ed275607cc98db134 2024-11-20T17:24:20,060 INFO [StoreOpener-e60665fdde91447ed275607cc98db134-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e60665fdde91447ed275607cc98db134 columnFamilyName info 2024-11-20T17:24:20,060 DEBUG [StoreOpener-e60665fdde91447ed275607cc98db134-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:20,061 INFO [StoreOpener-e60665fdde91447ed275607cc98db134-1 {}] regionserver.HStore(327): Store=e60665fdde91447ed275607cc98db134/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:20,062 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134 2024-11-20T17:24:20,063 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134 2024-11-20T17:24:20,066 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for e60665fdde91447ed275607cc98db134 2024-11-20T17:24:20,070 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:24:20,071 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened e60665fdde91447ed275607cc98db134; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73987862, jitterRate=0.10250505805015564}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T17:24:20,072 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for e60665fdde91447ed275607cc98db134: 2024-11-20T17:24:20,074 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134., pid=6, masterSystemTime=1732123460045 2024-11-20T17:24:20,077 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:24:20,078 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:24:20,078 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=e60665fdde91447ed275607cc98db134, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:20,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T17:24:20,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure e60665fdde91447ed275607cc98db134, server=d514dc944523,44015,1732123455293 in 191 msec 2024-11-20T17:24:20,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T17:24:20,089 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=e60665fdde91447ed275607cc98db134, ASSIGN in 354 msec 2024-11-20T17:24:20,091 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:24:20,091 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123460091"}]},"ts":"1732123460091"} 2024-11-20T17:24:20,094 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T17:24:20,097 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:24:20,100 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2570 sec 2024-11-20T17:24:20,150 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T17:24:20,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T17:24:20,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:20,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:24:20,181 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T17:24:20,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T17:24:20,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-11-20T17:24:20,215 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T17:24:20,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T17:24:20,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-20T17:24:20,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T17:24:20,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T17:24:20,244 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.882sec 2024-11-20T17:24:20,246 INFO [master/d514dc944523:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T17:24:20,247 INFO [master/d514dc944523:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T17:24:20,248 INFO [master/d514dc944523:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T17:24:20,249 INFO [master/d514dc944523:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T17:24:20,249 INFO [master/d514dc944523:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T17:24:20,250 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,35243,1732123454567-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T17:24:20,250 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,35243,1732123454567-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T17:24:20,257 DEBUG [master/d514dc944523:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T17:24:20,259 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T17:24:20,259 INFO [master/d514dc944523:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d514dc944523,35243,1732123454567-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T17:24:20,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6169df5c to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a3c3fb3 2024-11-20T17:24:20,347 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T17:24:20,354 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@523a59d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:20,357 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T17:24:20,358 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T17:24:20,368 DEBUG [hconnection-0x46f0e06c-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:20,378 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:20,387 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=d514dc944523,35243,1732123454567 2024-11-20T17:24:20,403 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=252, ProcessCount=11, AvailableMemoryMB=6857 2024-11-20T17:24:20,414 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:24:20,417 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:24:20,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:24:20,429 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:24:20,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:20,434 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:24:20,434 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T17:24:20,434 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:20,436 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:24:20,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:24:20,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741837_1013 (size=963) 2024-11-20T17:24:20,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:24:20,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:24:20,870 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:24:20,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741838_1014 (size=53) 2024-11-20T17:24:21,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:24:21,281 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:21,281 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 895da877845d8163116b8248e2bc3ffc, disabling compactions & flushes 2024-11-20T17:24:21,281 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:21,281 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:21,281 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. after waiting 0 ms 2024-11-20T17:24:21,281 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:21,281 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:21,281 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:21,283 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:24:21,284 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123461283"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123461283"}]},"ts":"1732123461283"} 2024-11-20T17:24:21,287 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:24:21,288 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:24:21,289 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123461288"}]},"ts":"1732123461288"} 2024-11-20T17:24:21,291 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:24:21,295 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=895da877845d8163116b8248e2bc3ffc, ASSIGN}] 2024-11-20T17:24:21,297 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=895da877845d8163116b8248e2bc3ffc, ASSIGN 2024-11-20T17:24:21,298 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=895da877845d8163116b8248e2bc3ffc, ASSIGN; state=OFFLINE, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=false 2024-11-20T17:24:21,449 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=895da877845d8163116b8248e2bc3ffc, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:21,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:24:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:24:21,606 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:21,612 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:21,613 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:24:21,613 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,613 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:21,613 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,614 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,616 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,619 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:21,619 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 895da877845d8163116b8248e2bc3ffc columnFamilyName A 2024-11-20T17:24:21,619 DEBUG [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:21,620 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.HStore(327): Store=895da877845d8163116b8248e2bc3ffc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:21,620 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,622 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:21,622 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 895da877845d8163116b8248e2bc3ffc columnFamilyName B 2024-11-20T17:24:21,622 DEBUG [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:21,623 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.HStore(327): Store=895da877845d8163116b8248e2bc3ffc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:21,623 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,625 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:21,625 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 895da877845d8163116b8248e2bc3ffc columnFamilyName C 2024-11-20T17:24:21,625 DEBUG [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:21,626 INFO [StoreOpener-895da877845d8163116b8248e2bc3ffc-1 {}] regionserver.HStore(327): Store=895da877845d8163116b8248e2bc3ffc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:21,626 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:21,628 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,628 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,631 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:24:21,633 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:21,636 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:24:21,637 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 895da877845d8163116b8248e2bc3ffc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66274632, jitterRate=-0.012431025505065918}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:24:21,638 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:21,640 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., pid=11, masterSystemTime=1732123461606 2024-11-20T17:24:21,642 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:21,643 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:21,643 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=895da877845d8163116b8248e2bc3ffc, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:21,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T17:24:21,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 in 195 msec 2024-11-20T17:24:21,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T17:24:21,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=895da877845d8163116b8248e2bc3ffc, ASSIGN in 355 msec 2024-11-20T17:24:21,655 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:24:21,655 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123461655"}]},"ts":"1732123461655"} 2024-11-20T17:24:21,658 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:24:21,661 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:24:21,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2310 sec 2024-11-20T17:24:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T17:24:22,583 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T17:24:22,589 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x038196d7 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e59596a 2024-11-20T17:24:22,592 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30640414, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,595 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,597 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,600 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:24:22,601 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:24:22,608 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x28808bb9 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2cac4303 2024-11-20T17:24:22,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@536a4a58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,613 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x774bf929 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39b10898 2024-11-20T17:24:22,617 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18751c86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,618 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x54af89df to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d7115de 2024-11-20T17:24:22,621 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dd0bbda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,622 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x251efa5e to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30d4d4c6 2024-11-20T17:24:22,626 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c57419f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,627 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67f7d3d3 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54c943d 2024-11-20T17:24:22,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@435176b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,632 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4bf8e82a to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f0c7188 2024-11-20T17:24:22,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e957ecd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,636 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ba01639 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@475ca0f4 2024-11-20T17:24:22,639 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22daddc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,640 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24890c79 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50c9c1d1 2024-11-20T17:24:22,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39028e20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,644 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51cab508 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4f1331a9 2024-11-20T17:24:22,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@624dc5e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:22,652 DEBUG [hconnection-0x169ff269-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,652 DEBUG [hconnection-0x4b969ab9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,657 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,658 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,658 DEBUG [hconnection-0x7c3cfe41-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,662 DEBUG [hconnection-0x2f204f5e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,663 DEBUG [hconnection-0x32d9f7e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,663 DEBUG [hconnection-0x47be79fd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,663 DEBUG [hconnection-0x52bb944e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,663 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:22,664 DEBUG [hconnection-0x3d2cc084-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,664 DEBUG [hconnection-0x522bb58c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:22,666 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,667 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,667 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T17:24:22,670 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54148, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,672 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,673 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:22,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:24:22,676 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:22,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:22,681 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54166, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,683 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:22,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:24:22,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:22,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:22,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:22,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:22,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:22,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:22,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:24:22,841 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:22,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:24:22,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:22,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:22,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:22,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:22,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:22,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/c4511284fa1943c9ac72520dcc9cb24b is 50, key is test_row_0/A:col10/1732123462733/Put/seqid=0 2024-11-20T17:24:22,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:22,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741839_1015 (size=12001) 2024-11-20T17:24:22,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/c4511284fa1943c9ac72520dcc9cb24b 2024-11-20T17:24:22,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:22,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123522909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:22,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:22,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123522919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:22,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:22,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123522919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:22,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:22,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123522930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:22,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:22,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123522931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:22,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:24:23,038 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:23,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:24:23,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/35f996dcb0774246a174b37bf3006a77 is 50, key is test_row_0/B:col10/1732123462733/Put/seqid=0 2024-11-20T17:24:23,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:23,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123523070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123523071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123523072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123523073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123523072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741840_1016 (size=12001) 2024-11-20T17:24:23,201 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:23,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:24:23,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:23,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123523278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:24:23,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123523279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123523281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123523281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123523283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:23,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:24:23,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:23,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,454 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T17:24:23,456 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T17:24:23,457 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T17:24:23,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/35f996dcb0774246a174b37bf3006a77 2024-11-20T17:24:23,516 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:23,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:24:23,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:23,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:23,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/5da896eac12142d6bd0d628f2b22afdc is 50, key is test_row_0/C:col10/1732123462733/Put/seqid=0 2024-11-20T17:24:23,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741841_1017 (size=12001) 2024-11-20T17:24:23,535 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/5da896eac12142d6bd0d628f2b22afdc 2024-11-20T17:24:23,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/c4511284fa1943c9ac72520dcc9cb24b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4511284fa1943c9ac72520dcc9cb24b 2024-11-20T17:24:23,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4511284fa1943c9ac72520dcc9cb24b, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T17:24:23,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/35f996dcb0774246a174b37bf3006a77 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/35f996dcb0774246a174b37bf3006a77 2024-11-20T17:24:23,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/35f996dcb0774246a174b37bf3006a77, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T17:24:23,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/5da896eac12142d6bd0d628f2b22afdc as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/5da896eac12142d6bd0d628f2b22afdc 2024-11-20T17:24:23,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123523588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123523588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123523588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123523589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123523589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:23,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/5da896eac12142d6bd0d628f2b22afdc, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T17:24:23,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 895da877845d8163116b8248e2bc3ffc in 875ms, sequenceid=13, compaction requested=false 2024-11-20T17:24:23,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:23,671 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:23,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T17:24:23,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,673 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:24:23,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:23,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:23,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:23,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:23,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:23,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:23,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/110e6da72e2245988fe11477f245e351 is 50, key is test_row_0/A:col10/1732123462918/Put/seqid=0 2024-11-20T17:24:23,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741842_1018 (size=12001) 2024-11-20T17:24:23,727 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/110e6da72e2245988fe11477f245e351 2024-11-20T17:24:23,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/8b42c94200cc442c8d208a1458b0957f is 50, key is test_row_0/B:col10/1732123462918/Put/seqid=0 2024-11-20T17:24:23,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741843_1019 (size=12001) 2024-11-20T17:24:23,771 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/8b42c94200cc442c8d208a1458b0957f 2024-11-20T17:24:23,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:24:23,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d is 50, key is test_row_0/C:col10/1732123462918/Put/seqid=0 2024-11-20T17:24:23,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741844_1020 (size=12001) 2024-11-20T17:24:23,820 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d 2024-11-20T17:24:23,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/110e6da72e2245988fe11477f245e351 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/110e6da72e2245988fe11477f245e351 2024-11-20T17:24:23,858 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/110e6da72e2245988fe11477f245e351, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T17:24:23,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/8b42c94200cc442c8d208a1458b0957f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8b42c94200cc442c8d208a1458b0957f 2024-11-20T17:24:23,872 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8b42c94200cc442c8d208a1458b0957f, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T17:24:23,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d 2024-11-20T17:24:23,885 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T17:24:23,888 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 895da877845d8163116b8248e2bc3ffc in 214ms, sequenceid=37, compaction requested=false 2024-11-20T17:24:23,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:23,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:23,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T17:24:23,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T17:24:23,895 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T17:24:23,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2130 sec 2024-11-20T17:24:23,899 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.2310 sec 2024-11-20T17:24:24,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:24:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:24,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:24,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:24,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/73e6ef050f224b3687b038fd88e053d5 is 50, key is test_row_0/A:col10/1732123464129/Put/seqid=0 2024-11-20T17:24:24,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741845_1021 (size=19021) 2024-11-20T17:24:24,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/73e6ef050f224b3687b038fd88e053d5 2024-11-20T17:24:24,198 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:24:24,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/1cbaa31bdafa49c3a214af353e4cb29b is 50, key is test_row_0/B:col10/1732123464129/Put/seqid=0 2024-11-20T17:24:24,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123524202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123524203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123524209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123524210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123524206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741846_1022 (size=12001) 2024-11-20T17:24:24,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/1cbaa31bdafa49c3a214af353e4cb29b 2024-11-20T17:24:24,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ed11ecf56daa47f3af449ac7fb19b797 is 50, key is test_row_0/C:col10/1732123464129/Put/seqid=0 2024-11-20T17:24:24,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741847_1023 (size=12001) 2024-11-20T17:24:24,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123524322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123524323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123524330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123524331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123524334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123524530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123524537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123524539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123524540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123524542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ed11ecf56daa47f3af449ac7fb19b797 2024-11-20T17:24:24,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/73e6ef050f224b3687b038fd88e053d5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/73e6ef050f224b3687b038fd88e053d5 2024-11-20T17:24:24,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/73e6ef050f224b3687b038fd88e053d5, entries=300, sequenceid=48, filesize=18.6 K 2024-11-20T17:24:24,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/1cbaa31bdafa49c3a214af353e4cb29b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1cbaa31bdafa49c3a214af353e4cb29b 2024-11-20T17:24:24,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1cbaa31bdafa49c3a214af353e4cb29b, entries=150, sequenceid=48, filesize=11.7 K 2024-11-20T17:24:24,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ed11ecf56daa47f3af449ac7fb19b797 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ed11ecf56daa47f3af449ac7fb19b797 2024-11-20T17:24:24,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ed11ecf56daa47f3af449ac7fb19b797, entries=150, sequenceid=48, filesize=11.7 K 2024-11-20T17:24:24,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 895da877845d8163116b8248e2bc3ffc in 629ms, sequenceid=48, compaction requested=true 2024-11-20T17:24:24,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:24,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:24,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:24,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:24,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:24,768 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:24,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:24,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:24:24,769 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:24,773 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:24,773 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:24,775 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:24,775 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:24,775 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:24,775 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:24,775 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/35f996dcb0774246a174b37bf3006a77, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8b42c94200cc442c8d208a1458b0957f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1cbaa31bdafa49c3a214af353e4cb29b] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=35.2 K 2024-11-20T17:24:24,775 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4511284fa1943c9ac72520dcc9cb24b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/110e6da72e2245988fe11477f245e351, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/73e6ef050f224b3687b038fd88e053d5] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=42.0 K 2024-11-20T17:24:24,778 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 35f996dcb0774246a174b37bf3006a77, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732123462680 2024-11-20T17:24:24,780 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4511284fa1943c9ac72520dcc9cb24b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732123462680 2024-11-20T17:24:24,781 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b42c94200cc442c8d208a1458b0957f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732123462901 2024-11-20T17:24:24,781 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 110e6da72e2245988fe11477f245e351, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732123462901 2024-11-20T17:24:24,782 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cbaa31bdafa49c3a214af353e4cb29b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732123464126 2024-11-20T17:24:24,782 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73e6ef050f224b3687b038fd88e053d5, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732123464117 2024-11-20T17:24:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T17:24:24,794 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T17:24:24,797 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:24,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T17:24:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:24:24,801 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:24,803 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:24,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:24,828 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#10 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:24,828 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#9 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:24,829 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/d3a55a676f6a43d6b39e3266be49f757 is 50, key is test_row_0/A:col10/1732123464129/Put/seqid=0 2024-11-20T17:24:24,829 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/41cb0279f5034b1fbddc9e78dea3942e is 50, key is test_row_0/B:col10/1732123464129/Put/seqid=0 2024-11-20T17:24:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:24,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:24:24,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:24,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:24,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:24,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:24,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:24,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:24,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741848_1024 (size=12104) 2024-11-20T17:24:24,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123524864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123524865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741849_1025 (size=12104) 2024-11-20T17:24:24,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123524868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123524874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,887 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/d3a55a676f6a43d6b39e3266be49f757 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d3a55a676f6a43d6b39e3266be49f757 2024-11-20T17:24:24,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123524874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/2a622620e5d8451d87a47ee7301752cd is 50, key is test_row_0/A:col10/1732123464202/Put/seqid=0 2024-11-20T17:24:24,900 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/41cb0279f5034b1fbddc9e78dea3942e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/41cb0279f5034b1fbddc9e78dea3942e 2024-11-20T17:24:24,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:24:24,915 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into d3a55a676f6a43d6b39e3266be49f757(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:24,915 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:24,915 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123464765; duration=0sec 2024-11-20T17:24:24,915 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:24,916 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:24,916 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:24,919 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:24,919 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:24,919 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:24,919 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/5da896eac12142d6bd0d628f2b22afdc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ed11ecf56daa47f3af449ac7fb19b797] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=35.2 K 2024-11-20T17:24:24,920 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5da896eac12142d6bd0d628f2b22afdc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732123462680 2024-11-20T17:24:24,921 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7fe6e66ad134dd4b773bdeaeb1bfd3d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732123462901 2024-11-20T17:24:24,923 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed11ecf56daa47f3af449ac7fb19b797, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732123464126 2024-11-20T17:24:24,923 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into 41cb0279f5034b1fbddc9e78dea3942e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:24,923 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:24,923 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123464768; duration=0sec 2024-11-20T17:24:24,923 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:24,924 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:24,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741850_1026 (size=14341) 2024-11-20T17:24:24,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/2a622620e5d8451d87a47ee7301752cd 2024-11-20T17:24:24,960 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:24,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:24:24,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:24,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:24,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:24,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:24,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:24,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:24,965 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#12 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:24,968 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/bfbd54f1d73d4b9998c43771469c98ba is 50, key is test_row_0/C:col10/1732123464129/Put/seqid=0 2024-11-20T17:24:24,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c1eec70ce7b149b4b4d54856dc98e480 is 50, key is test_row_0/B:col10/1732123464202/Put/seqid=0 2024-11-20T17:24:24,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123524978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123524979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123524987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123524988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:24,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123524989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:24,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741851_1027 (size=12104) 2024-11-20T17:24:25,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741852_1028 (size=12001) 2024-11-20T17:24:25,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c1eec70ce7b149b4b4d54856dc98e480 2024-11-20T17:24:25,011 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/bfbd54f1d73d4b9998c43771469c98ba as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bfbd54f1d73d4b9998c43771469c98ba 2024-11-20T17:24:25,027 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into bfbd54f1d73d4b9998c43771469c98ba(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:25,027 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:25,027 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123464768; duration=0sec 2024-11-20T17:24:25,028 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:25,028 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:25,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/7291404716754ae3848d96dddffa5895 is 50, key is test_row_0/C:col10/1732123464202/Put/seqid=0 2024-11-20T17:24:25,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741853_1029 (size=12001) 2024-11-20T17:24:25,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T17:24:25,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T17:24:25,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/7291404716754ae3848d96dddffa5895 2024-11-20T17:24:25,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T17:24:25,062 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T17:24:25,064 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T17:24:25,064 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T17:24:25,065 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T17:24:25,065 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T17:24:25,067 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T17:24:25,067 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T17:24:25,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/2a622620e5d8451d87a47ee7301752cd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2a622620e5d8451d87a47ee7301752cd 2024-11-20T17:24:25,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2a622620e5d8451d87a47ee7301752cd, entries=200, sequenceid=74, filesize=14.0 K 2024-11-20T17:24:25,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c1eec70ce7b149b4b4d54856dc98e480 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c1eec70ce7b149b4b4d54856dc98e480 2024-11-20T17:24:25,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c1eec70ce7b149b4b4d54856dc98e480, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T17:24:25,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:24:25,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/7291404716754ae3848d96dddffa5895 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7291404716754ae3848d96dddffa5895 2024-11-20T17:24:25,115 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:25,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:24:25,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:25,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:25,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:25,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:25,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:25,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:25,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7291404716754ae3848d96dddffa5895, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T17:24:25,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 895da877845d8163116b8248e2bc3ffc in 279ms, sequenceid=74, compaction requested=false 2024-11-20T17:24:25,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:25,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:25,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:24:25,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:25,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:25,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:25,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:25,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:25,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:25,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/98aaeae59ff242ccbedba70e3a0db508 is 50, key is test_row_0/A:col10/1732123465189/Put/seqid=0 2024-11-20T17:24:25,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741854_1030 (size=9657) 2024-11-20T17:24:25,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/98aaeae59ff242ccbedba70e3a0db508 2024-11-20T17:24:25,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123525240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123525243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123525243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123525244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123525247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/0e000fcbd53a488a8579f5876681e30c is 50, key is test_row_0/B:col10/1732123465189/Put/seqid=0 2024-11-20T17:24:25,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:25,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:24:25,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:25,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:25,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:25,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:25,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:25,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741855_1031 (size=9657) 2024-11-20T17:24:25,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/0e000fcbd53a488a8579f5876681e30c 2024-11-20T17:24:25,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a5ecc0ee825b4ebb8278152c405b81f6 is 50, key is test_row_0/C:col10/1732123465189/Put/seqid=0 2024-11-20T17:24:25,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123525353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123525352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123525354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123525354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123525355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741856_1032 (size=9657) 2024-11-20T17:24:25,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a5ecc0ee825b4ebb8278152c405b81f6 2024-11-20T17:24:25,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/98aaeae59ff242ccbedba70e3a0db508 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/98aaeae59ff242ccbedba70e3a0db508 2024-11-20T17:24:25,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/98aaeae59ff242ccbedba70e3a0db508, entries=100, sequenceid=89, filesize=9.4 K 2024-11-20T17:24:25,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/0e000fcbd53a488a8579f5876681e30c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/0e000fcbd53a488a8579f5876681e30c 2024-11-20T17:24:25,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/0e000fcbd53a488a8579f5876681e30c, entries=100, sequenceid=89, filesize=9.4 K 2024-11-20T17:24:25,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a5ecc0ee825b4ebb8278152c405b81f6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a5ecc0ee825b4ebb8278152c405b81f6 2024-11-20T17:24:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:24:25,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a5ecc0ee825b4ebb8278152c405b81f6, entries=100, sequenceid=89, filesize=9.4 K 2024-11-20T17:24:25,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 895da877845d8163116b8248e2bc3ffc in 232ms, sequenceid=89, compaction requested=true 2024-11-20T17:24:25,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:25,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:25,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:25,426 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:25,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:25,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:25,426 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:25,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:25,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:25,428 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:25,428 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:25,428 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:25,428 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36102 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:25,429 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:25,429 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/41cb0279f5034b1fbddc9e78dea3942e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c1eec70ce7b149b4b4d54856dc98e480, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/0e000fcbd53a488a8579f5876681e30c] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=33.0 K 2024-11-20T17:24:25,429 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:25,429 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d3a55a676f6a43d6b39e3266be49f757, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2a622620e5d8451d87a47ee7301752cd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/98aaeae59ff242ccbedba70e3a0db508] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=35.3 K 2024-11-20T17:24:25,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:25,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T17:24:25,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:25,430 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:24:25,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:25,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:25,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:25,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:25,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:25,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:25,432 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3a55a676f6a43d6b39e3266be49f757, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732123464126 2024-11-20T17:24:25,433 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a622620e5d8451d87a47ee7301752cd, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732123464196 2024-11-20T17:24:25,433 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 41cb0279f5034b1fbddc9e78dea3942e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732123464126 2024-11-20T17:24:25,434 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98aaeae59ff242ccbedba70e3a0db508, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123465189 2024-11-20T17:24:25,435 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c1eec70ce7b149b4b4d54856dc98e480, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732123464196 2024-11-20T17:24:25,437 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e000fcbd53a488a8579f5876681e30c, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123465189 2024-11-20T17:24:25,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/1cbcbdd91c4d45188863b7688a4f271d is 50, key is test_row_0/A:col10/1732123465237/Put/seqid=0 2024-11-20T17:24:25,482 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#19 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:25,483 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/52bd35798d0445b5a216f04b9e9bf3c7 is 50, key is test_row_0/A:col10/1732123465189/Put/seqid=0 2024-11-20T17:24:25,485 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#20 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:25,486 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/8505e4f338af448caa18e8b72e5870d3 is 50, key is test_row_0/B:col10/1732123465189/Put/seqid=0 2024-11-20T17:24:25,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741857_1033 (size=12001) 2024-11-20T17:24:25,494 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/1cbcbdd91c4d45188863b7688a4f271d 2024-11-20T17:24:25,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741859_1035 (size=12207) 2024-11-20T17:24:25,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741858_1034 (size=12207) 2024-11-20T17:24:25,542 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/8505e4f338af448caa18e8b72e5870d3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8505e4f338af448caa18e8b72e5870d3 2024-11-20T17:24:25,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/14451cf7d6a948ea874951526cdd99d7 is 50, key is test_row_0/B:col10/1732123465237/Put/seqid=0 2024-11-20T17:24:25,559 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into 8505e4f338af448caa18e8b72e5870d3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:25,559 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:25,560 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123465426; duration=0sec 2024-11-20T17:24:25,560 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:25,560 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:25,560 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:25,563 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:25,563 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:25,563 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:25,563 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bfbd54f1d73d4b9998c43771469c98ba, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7291404716754ae3848d96dddffa5895, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a5ecc0ee825b4ebb8278152c405b81f6] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=33.0 K 2024-11-20T17:24:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:25,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:25,566 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting bfbd54f1d73d4b9998c43771469c98ba, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732123464126 2024-11-20T17:24:25,567 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7291404716754ae3848d96dddffa5895, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732123464196 2024-11-20T17:24:25,568 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a5ecc0ee825b4ebb8278152c405b81f6, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123465189 2024-11-20T17:24:25,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741860_1036 (size=12001) 2024-11-20T17:24:25,587 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:25,588 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/7536c55320ec432db3a953cb38c0c086 is 50, key is test_row_0/C:col10/1732123465189/Put/seqid=0 2024-11-20T17:24:25,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123525582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123525582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123525583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123525586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123525591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741861_1037 (size=12207) 2024-11-20T17:24:25,636 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/7536c55320ec432db3a953cb38c0c086 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7536c55320ec432db3a953cb38c0c086 2024-11-20T17:24:25,649 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 7536c55320ec432db3a953cb38c0c086(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:25,649 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:25,649 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123465426; duration=0sec 2024-11-20T17:24:25,650 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:25,650 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:25,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123525693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123525695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123525695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123525695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123525695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123525898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123525898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123525900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123525904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:25,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123525907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:24:25,950 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/52bd35798d0445b5a216f04b9e9bf3c7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/52bd35798d0445b5a216f04b9e9bf3c7 2024-11-20T17:24:25,963 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into 52bd35798d0445b5a216f04b9e9bf3c7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:25,963 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:25,963 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123465426; duration=0sec 2024-11-20T17:24:25,963 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:25,964 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:25,975 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/14451cf7d6a948ea874951526cdd99d7 2024-11-20T17:24:25,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/741a072ba027498a812679056e279012 is 50, key is test_row_0/C:col10/1732123465237/Put/seqid=0 2024-11-20T17:24:26,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741862_1038 (size=12001) 2024-11-20T17:24:26,017 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/741a072ba027498a812679056e279012 2024-11-20T17:24:26,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/1cbcbdd91c4d45188863b7688a4f271d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1cbcbdd91c4d45188863b7688a4f271d 2024-11-20T17:24:26,041 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1cbcbdd91c4d45188863b7688a4f271d, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T17:24:26,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/14451cf7d6a948ea874951526cdd99d7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/14451cf7d6a948ea874951526cdd99d7 2024-11-20T17:24:26,054 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/14451cf7d6a948ea874951526cdd99d7, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T17:24:26,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/741a072ba027498a812679056e279012 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/741a072ba027498a812679056e279012 2024-11-20T17:24:26,064 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/741a072ba027498a812679056e279012, entries=150, sequenceid=113, filesize=11.7 K 2024-11-20T17:24:26,067 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 895da877845d8163116b8248e2bc3ffc in 636ms, sequenceid=113, compaction requested=false 2024-11-20T17:24:26,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:26,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:26,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T17:24:26,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T17:24:26,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T17:24:26,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2670 sec 2024-11-20T17:24:26,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.2780 sec 2024-11-20T17:24:26,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:26,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T17:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:26,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:26,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/8bc3069e9f56491fad6bb59cc415f18b is 50, key is test_row_0/A:col10/1732123466204/Put/seqid=0 2024-11-20T17:24:26,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741863_1039 (size=12051) 2024-11-20T17:24:26,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/8bc3069e9f56491fad6bb59cc415f18b 2024-11-20T17:24:26,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123526240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123526243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/5ab7e4b084be4953b4c1100fc836867b is 50, key is test_row_0/B:col10/1732123466204/Put/seqid=0 2024-11-20T17:24:26,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123526247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123526249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123526250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741864_1040 (size=12051) 2024-11-20T17:24:26,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/5ab7e4b084be4953b4c1100fc836867b 2024-11-20T17:24:26,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/96eecc685e694824a5ed96e1b7d2aa7f is 50, key is test_row_0/C:col10/1732123466204/Put/seqid=0 2024-11-20T17:24:26,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741865_1041 (size=12051) 2024-11-20T17:24:26,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/96eecc685e694824a5ed96e1b7d2aa7f 2024-11-20T17:24:26,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123526354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123526356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123526357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123526358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/8bc3069e9f56491fad6bb59cc415f18b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/8bc3069e9f56491fad6bb59cc415f18b 2024-11-20T17:24:26,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123526358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/8bc3069e9f56491fad6bb59cc415f18b, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T17:24:26,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/5ab7e4b084be4953b4c1100fc836867b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/5ab7e4b084be4953b4c1100fc836867b 2024-11-20T17:24:26,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/5ab7e4b084be4953b4c1100fc836867b, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T17:24:26,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/96eecc685e694824a5ed96e1b7d2aa7f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/96eecc685e694824a5ed96e1b7d2aa7f 2024-11-20T17:24:26,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/96eecc685e694824a5ed96e1b7d2aa7f, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T17:24:26,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 895da877845d8163116b8248e2bc3ffc in 208ms, sequenceid=131, compaction requested=true 2024-11-20T17:24:26,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:26,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:26,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:26,415 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:26,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:26,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:26,416 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:26,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:26,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:26,418 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:26,418 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:26,418 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:26,418 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/52bd35798d0445b5a216f04b9e9bf3c7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1cbcbdd91c4d45188863b7688a4f271d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/8bc3069e9f56491fad6bb59cc415f18b] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=35.4 K 2024-11-20T17:24:26,419 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:26,419 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:26,419 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52bd35798d0445b5a216f04b9e9bf3c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123464196 2024-11-20T17:24:26,420 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:26,420 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8505e4f338af448caa18e8b72e5870d3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/14451cf7d6a948ea874951526cdd99d7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/5ab7e4b084be4953b4c1100fc836867b] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=35.4 K 2024-11-20T17:24:26,421 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cbcbdd91c4d45188863b7688a4f271d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732123465231 2024-11-20T17:24:26,421 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 8505e4f338af448caa18e8b72e5870d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123464196 2024-11-20T17:24:26,422 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8bc3069e9f56491fad6bb59cc415f18b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123465578 2024-11-20T17:24:26,423 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 14451cf7d6a948ea874951526cdd99d7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732123465231 2024-11-20T17:24:26,424 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ab7e4b084be4953b4c1100fc836867b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123465578 2024-11-20T17:24:26,441 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:26,442 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/9166646d83ff44bf804bc9a645f65564 is 50, key is test_row_0/A:col10/1732123466204/Put/seqid=0 2024-11-20T17:24:26,450 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#28 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:26,451 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/7eefeedcba5d486dabb56e9612439365 is 50, key is test_row_0/B:col10/1732123466204/Put/seqid=0 2024-11-20T17:24:26,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741866_1042 (size=12359) 2024-11-20T17:24:26,474 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/9166646d83ff44bf804bc9a645f65564 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/9166646d83ff44bf804bc9a645f65564 2024-11-20T17:24:26,487 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into 9166646d83ff44bf804bc9a645f65564(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:26,487 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:26,487 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123466415; duration=0sec 2024-11-20T17:24:26,487 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:26,487 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:26,488 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:26,490 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:26,491 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:26,491 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:26,491 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7536c55320ec432db3a953cb38c0c086, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/741a072ba027498a812679056e279012, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/96eecc685e694824a5ed96e1b7d2aa7f] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=35.4 K 2024-11-20T17:24:26,492 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7536c55320ec432db3a953cb38c0c086, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123464196 2024-11-20T17:24:26,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741867_1043 (size=12359) 2024-11-20T17:24:26,494 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 741a072ba027498a812679056e279012, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1732123465231 2024-11-20T17:24:26,494 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96eecc685e694824a5ed96e1b7d2aa7f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123465578 2024-11-20T17:24:26,509 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#29 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:26,510 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/9c49657710234b0681b647b9d7a91fb4 is 50, key is test_row_0/C:col10/1732123466204/Put/seqid=0 2024-11-20T17:24:26,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741868_1044 (size=12359) 2024-11-20T17:24:26,543 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/9c49657710234b0681b647b9d7a91fb4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9c49657710234b0681b647b9d7a91fb4 2024-11-20T17:24:26,554 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 9c49657710234b0681b647b9d7a91fb4(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:26,554 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:26,554 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123466416; duration=0sec 2024-11-20T17:24:26,555 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:26,555 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:26,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:24:26,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:26,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:26,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:26,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:26,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:26,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:26,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:26,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/31746a004a254e3182af394d517df7ca is 50, key is test_row_0/A:col10/1732123466565/Put/seqid=0 2024-11-20T17:24:26,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123526585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123526585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123526585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123526587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123526587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741869_1045 (size=14541) 2024-11-20T17:24:26,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/31746a004a254e3182af394d517df7ca 2024-11-20T17:24:26,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/ad1e2ebc371c49e0af292510037500fb is 50, key is test_row_0/B:col10/1732123466565/Put/seqid=0 2024-11-20T17:24:26,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741870_1046 (size=12151) 2024-11-20T17:24:26,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/ad1e2ebc371c49e0af292510037500fb 2024-11-20T17:24:26,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ddfe821192904583b0a02c1041c2f7db is 50, key is test_row_0/C:col10/1732123466565/Put/seqid=0 2024-11-20T17:24:26,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741871_1047 (size=12151) 2024-11-20T17:24:26,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123526691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123526692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123526692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123526694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123526692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123526899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123526900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123526900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123526901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:26,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123526903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:26,910 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/7eefeedcba5d486dabb56e9612439365 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7eefeedcba5d486dabb56e9612439365 2024-11-20T17:24:26,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T17:24:26,914 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T17:24:26,918 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:26,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T17:24:26,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:24:26,922 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:26,922 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into 7eefeedcba5d486dabb56e9612439365(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:26,922 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:26,922 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123466415; duration=0sec 2024-11-20T17:24:26,922 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:26,922 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:26,923 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:26,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:27,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:24:27,077 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:27,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ddfe821192904583b0a02c1041c2f7db 2024-11-20T17:24:27,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:24:27,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:27,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/31746a004a254e3182af394d517df7ca as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/31746a004a254e3182af394d517df7ca 2024-11-20T17:24:27,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/31746a004a254e3182af394d517df7ca, entries=200, sequenceid=158, filesize=14.2 K 2024-11-20T17:24:27,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/ad1e2ebc371c49e0af292510037500fb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ad1e2ebc371c49e0af292510037500fb 2024-11-20T17:24:27,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ad1e2ebc371c49e0af292510037500fb, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T17:24:27,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ddfe821192904583b0a02c1041c2f7db as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ddfe821192904583b0a02c1041c2f7db 2024-11-20T17:24:27,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ddfe821192904583b0a02c1041c2f7db, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T17:24:27,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 895da877845d8163116b8248e2bc3ffc in 559ms, sequenceid=158, compaction requested=false 2024-11-20T17:24:27,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:27,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:27,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:24:27,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:27,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:27,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:27,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:27,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:27,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:27,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/3439c30e0bb341d086856796fce9c823 is 50, key is test_row_0/A:col10/1732123466584/Put/seqid=0 2024-11-20T17:24:27,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:24:27,232 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:27,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741872_1048 (size=12151) 2024-11-20T17:24:27,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:24:27,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:27,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,234 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123527238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123527239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123527240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123527240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123527240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123527346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123527346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123527346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123527347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123527347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,386 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:27,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:24:27,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:27,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:24:27,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:27,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:24:27,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:27,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123527550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123527550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123527550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123527552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123527553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/3439c30e0bb341d086856796fce9c823 2024-11-20T17:24:27,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/ed065afd4831452e9451d3a6987fbb91 is 50, key is test_row_0/B:col10/1732123466584/Put/seqid=0 2024-11-20T17:24:27,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741873_1049 (size=12151) 2024-11-20T17:24:27,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:27,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:24:27,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:27,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,849 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:27,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:24:27,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:27,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:27,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:27,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123527854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123527855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123527855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123527858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:27,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:27,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123527859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,007 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:28,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:24:28,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:28,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:28,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:28,008 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:28,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:28,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:28,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:24:28,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/ed065afd4831452e9451d3a6987fbb91 2024-11-20T17:24:28,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/451003116cb346568ed0deff2aeef532 is 50, key is test_row_0/C:col10/1732123466584/Put/seqid=0 2024-11-20T17:24:28,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741874_1050 (size=12151) 2024-11-20T17:24:28,104 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/451003116cb346568ed0deff2aeef532 2024-11-20T17:24:28,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/3439c30e0bb341d086856796fce9c823 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/3439c30e0bb341d086856796fce9c823 2024-11-20T17:24:28,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/3439c30e0bb341d086856796fce9c823, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T17:24:28,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/ed065afd4831452e9451d3a6987fbb91 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ed065afd4831452e9451d3a6987fbb91 2024-11-20T17:24:28,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ed065afd4831452e9451d3a6987fbb91, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T17:24:28,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/451003116cb346568ed0deff2aeef532 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/451003116cb346568ed0deff2aeef532 2024-11-20T17:24:28,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/451003116cb346568ed0deff2aeef532, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T17:24:28,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 895da877845d8163116b8248e2bc3ffc in 945ms, sequenceid=172, compaction requested=true 2024-11-20T17:24:28,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:28,152 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:28,152 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:28,154 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:28,154 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:28,154 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:28,154 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:28,154 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:28,154 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:28,154 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/9166646d83ff44bf804bc9a645f65564, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/31746a004a254e3182af394d517df7ca, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/3439c30e0bb341d086856796fce9c823] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=38.1 K 2024-11-20T17:24:28,154 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7eefeedcba5d486dabb56e9612439365, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ad1e2ebc371c49e0af292510037500fb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ed065afd4831452e9451d3a6987fbb91] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=35.8 K 2024-11-20T17:24:28,155 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7eefeedcba5d486dabb56e9612439365, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123465578 2024-11-20T17:24:28,155 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9166646d83ff44bf804bc9a645f65564, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123465578 2024-11-20T17:24:28,155 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ad1e2ebc371c49e0af292510037500fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732123466247 2024-11-20T17:24:28,156 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31746a004a254e3182af394d517df7ca, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732123466242 2024-11-20T17:24:28,156 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ed065afd4831452e9451d3a6987fbb91, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123466583 2024-11-20T17:24:28,156 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3439c30e0bb341d086856796fce9c823, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123466583 2024-11-20T17:24:28,161 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:28,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T17:24:28,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:28,163 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:24:28,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:28,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:28,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:28,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:28,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:28,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:28,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/874fe7ca24554e2988c36382ee8bf026 is 50, key is test_row_0/A:col10/1732123467238/Put/seqid=0 2024-11-20T17:24:28,176 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#37 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:28,176 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/a09855b467c34b8a8988338b4154e829 is 50, key is test_row_0/A:col10/1732123466584/Put/seqid=0 2024-11-20T17:24:28,182 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#38 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:28,183 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c2479c79ff6c4247a00b2c3c3d1fe096 is 50, key is test_row_0/B:col10/1732123466584/Put/seqid=0 2024-11-20T17:24:28,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741875_1051 (size=12561) 2024-11-20T17:24:28,229 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/a09855b467c34b8a8988338b4154e829 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/a09855b467c34b8a8988338b4154e829 2024-11-20T17:24:28,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741876_1052 (size=12151) 2024-11-20T17:24:28,242 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into a09855b467c34b8a8988338b4154e829(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:28,242 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:28,242 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123468152; duration=0sec 2024-11-20T17:24:28,242 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:28,242 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:28,243 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:28,254 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:28,255 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:28,255 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:28,255 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9c49657710234b0681b647b9d7a91fb4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ddfe821192904583b0a02c1041c2f7db, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/451003116cb346568ed0deff2aeef532] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=35.8 K 2024-11-20T17:24:28,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741877_1053 (size=12561) 2024-11-20T17:24:28,256 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c49657710234b0681b647b9d7a91fb4, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123465578 2024-11-20T17:24:28,257 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddfe821192904583b0a02c1041c2f7db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732123466247 2024-11-20T17:24:28,258 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 451003116cb346568ed0deff2aeef532, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123466583 2024-11-20T17:24:28,271 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:28,272 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/7661e54223de49c99b0951ec4de092ee is 50, key is test_row_0/C:col10/1732123466584/Put/seqid=0 2024-11-20T17:24:28,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741878_1054 (size=12561) 2024-11-20T17:24:28,306 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/7661e54223de49c99b0951ec4de092ee as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7661e54223de49c99b0951ec4de092ee 2024-11-20T17:24:28,321 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 7661e54223de49c99b0951ec4de092ee(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:28,321 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:28,321 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123468152; duration=0sec 2024-11-20T17:24:28,323 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:28,323 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:28,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:28,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:28,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123528372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123528373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123528374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123528376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123528376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123528478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123528478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123528479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123528482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123528482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,643 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/874fe7ca24554e2988c36382ee8bf026 2024-11-20T17:24:28,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/2ec031c2b15d422faf9ee7f1eabd022c is 50, key is test_row_0/B:col10/1732123467238/Put/seqid=0 2024-11-20T17:24:28,664 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c2479c79ff6c4247a00b2c3c3d1fe096 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c2479c79ff6c4247a00b2c3c3d1fe096 2024-11-20T17:24:28,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741879_1055 (size=12151) 2024-11-20T17:24:28,677 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/2ec031c2b15d422faf9ee7f1eabd022c 2024-11-20T17:24:28,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123528683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,685 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into c2479c79ff6c4247a00b2c3c3d1fe096(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:28,685 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:28,685 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123468152; duration=0sec 2024-11-20T17:24:28,685 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:28,685 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:28,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123528683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123528684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123528685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:28,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123528685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:28,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a01614e377e244a5ae8ecbe26b6da7b1 is 50, key is test_row_0/C:col10/1732123467238/Put/seqid=0 2024-11-20T17:24:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741880_1056 (size=12151) 2024-11-20T17:24:28,718 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a01614e377e244a5ae8ecbe26b6da7b1 2024-11-20T17:24:28,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/874fe7ca24554e2988c36382ee8bf026 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/874fe7ca24554e2988c36382ee8bf026 2024-11-20T17:24:28,739 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/874fe7ca24554e2988c36382ee8bf026, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T17:24:28,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/2ec031c2b15d422faf9ee7f1eabd022c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2ec031c2b15d422faf9ee7f1eabd022c 2024-11-20T17:24:28,749 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2ec031c2b15d422faf9ee7f1eabd022c, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T17:24:28,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a01614e377e244a5ae8ecbe26b6da7b1 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a01614e377e244a5ae8ecbe26b6da7b1 2024-11-20T17:24:28,762 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a01614e377e244a5ae8ecbe26b6da7b1, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T17:24:28,763 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 895da877845d8163116b8248e2bc3ffc in 601ms, sequenceid=196, compaction requested=false 2024-11-20T17:24:28,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:28,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:28,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T17:24:28,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T17:24:28,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T17:24:28,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8420 sec 2024-11-20T17:24:28,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.8510 sec 2024-11-20T17:24:28,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:28,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:24:28,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:28,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:28,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:28,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:28,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:28,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/bb7b06da475a49a2813626cde68abc3f is 50, key is test_row_0/A:col10/1732123468991/Put/seqid=0 2024-11-20T17:24:29,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123529018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123529020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123529021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123529022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741881_1057 (size=16931) 2024-11-20T17:24:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123529024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T17:24:29,030 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T17:24:29,033 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:29,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T17:24:29,035 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:24:29,036 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:29,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:29,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123529126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123529127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123529127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123529127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123529128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:24:29,188 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:29,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T17:24:29,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:29,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123529329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123529329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123529331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123529332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123529333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:24:29,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:29,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T17:24:29,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:29,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/bb7b06da475a49a2813626cde68abc3f 2024-11-20T17:24:29,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c6d393fa528a416d96833df5163fb87e is 50, key is test_row_0/B:col10/1732123468991/Put/seqid=0 2024-11-20T17:24:29,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741882_1058 (size=12151) 2024-11-20T17:24:29,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c6d393fa528a416d96833df5163fb87e 2024-11-20T17:24:29,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/169bf1cc0548435b9f265dc5402dc554 is 50, key is test_row_0/C:col10/1732123468991/Put/seqid=0 2024-11-20T17:24:29,498 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:29,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T17:24:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:29,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741883_1059 (size=12151) 2024-11-20T17:24:29,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/169bf1cc0548435b9f265dc5402dc554 2024-11-20T17:24:29,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/bb7b06da475a49a2813626cde68abc3f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/bb7b06da475a49a2813626cde68abc3f 2024-11-20T17:24:29,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/bb7b06da475a49a2813626cde68abc3f, entries=250, sequenceid=214, filesize=16.5 K 2024-11-20T17:24:29,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c6d393fa528a416d96833df5163fb87e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6d393fa528a416d96833df5163fb87e 2024-11-20T17:24:29,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6d393fa528a416d96833df5163fb87e, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T17:24:29,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/169bf1cc0548435b9f265dc5402dc554 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/169bf1cc0548435b9f265dc5402dc554 2024-11-20T17:24:29,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/169bf1cc0548435b9f265dc5402dc554, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T17:24:29,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 895da877845d8163116b8248e2bc3ffc in 567ms, sequenceid=214, compaction requested=true 2024-11-20T17:24:29,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:29,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:29,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:29,560 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:29,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:29,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:29,560 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:29,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:29,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:29,561 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:29,561 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:29,562 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:29,562 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:29,562 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,562 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,562 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/a09855b467c34b8a8988338b4154e829, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/874fe7ca24554e2988c36382ee8bf026, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/bb7b06da475a49a2813626cde68abc3f] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=40.7 K 2024-11-20T17:24:29,562 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c2479c79ff6c4247a00b2c3c3d1fe096, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2ec031c2b15d422faf9ee7f1eabd022c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6d393fa528a416d96833df5163fb87e] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=36.0 K 2024-11-20T17:24:29,562 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c2479c79ff6c4247a00b2c3c3d1fe096, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123466583 2024-11-20T17:24:29,562 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting a09855b467c34b8a8988338b4154e829, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123466583 2024-11-20T17:24:29,563 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ec031c2b15d422faf9ee7f1eabd022c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123467235 2024-11-20T17:24:29,563 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 874fe7ca24554e2988c36382ee8bf026, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123467235 2024-11-20T17:24:29,564 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c6d393fa528a416d96833df5163fb87e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123468372 2024-11-20T17:24:29,564 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb7b06da475a49a2813626cde68abc3f, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123468372 2024-11-20T17:24:29,580 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#45 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:29,581 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/7da581a06b694ba69b59237fa8553f83 is 50, key is test_row_0/A:col10/1732123468991/Put/seqid=0 2024-11-20T17:24:29,586 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#46 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:29,587 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/bef7de201b30464a88ffa2c06b0ea387 is 50, key is test_row_0/B:col10/1732123468991/Put/seqid=0 2024-11-20T17:24:29,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741884_1060 (size=12663) 2024-11-20T17:24:29,619 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/7da581a06b694ba69b59237fa8553f83 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7da581a06b694ba69b59237fa8553f83 2024-11-20T17:24:29,630 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into 7da581a06b694ba69b59237fa8553f83(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:29,630 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:29,630 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123469559; duration=0sec 2024-11-20T17:24:29,631 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:29,631 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:29,631 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:29,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741885_1061 (size=12663) 2024-11-20T17:24:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:24:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:29,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:24:29,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:29,640 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:29,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,640 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:29,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:29,640 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:29,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,640 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7661e54223de49c99b0951ec4de092ee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a01614e377e244a5ae8ecbe26b6da7b1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/169bf1cc0548435b9f265dc5402dc554] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=36.0 K 2024-11-20T17:24:29,642 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7661e54223de49c99b0951ec4de092ee, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123466583 2024-11-20T17:24:29,643 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting a01614e377e244a5ae8ecbe26b6da7b1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123467235 2024-11-20T17:24:29,645 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 169bf1cc0548435b9f265dc5402dc554, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123468372 2024-11-20T17:24:29,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/337cb931df7f41a4a9263401d61fd3e5 is 50, key is test_row_0/A:col10/1732123469021/Put/seqid=0 2024-11-20T17:24:29,654 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:29,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T17:24:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,656 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/bef7de201b30464a88ffa2c06b0ea387 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/bef7de201b30464a88ffa2c06b0ea387 2024-11-20T17:24:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:29,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741886_1062 (size=14541) 2024-11-20T17:24:29,667 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:29,668 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/613e3be5892f43e7850614a6a2adda20 is 50, key is test_row_0/C:col10/1732123468991/Put/seqid=0 2024-11-20T17:24:29,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/337cb931df7f41a4a9263401d61fd3e5 2024-11-20T17:24:29,668 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into bef7de201b30464a88ffa2c06b0ea387(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:29,668 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:29,668 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123469560; duration=0sec 2024-11-20T17:24:29,669 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:29,669 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:29,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123529669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123529670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123529672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123529673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123529674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/9dba84434c364406ac32986e5350efae is 50, key is test_row_0/B:col10/1732123469021/Put/seqid=0 2024-11-20T17:24:29,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741887_1063 (size=12663) 2024-11-20T17:24:29,706 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/613e3be5892f43e7850614a6a2adda20 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/613e3be5892f43e7850614a6a2adda20 2024-11-20T17:24:29,715 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 613e3be5892f43e7850614a6a2adda20(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:29,716 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:29,716 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123469560; duration=0sec 2024-11-20T17:24:29,716 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:29,716 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:29,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741888_1064 (size=12151) 2024-11-20T17:24:29,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/9dba84434c364406ac32986e5350efae 2024-11-20T17:24:29,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/269093d9515643b594b097a6a8de4b35 is 50, key is test_row_0/C:col10/1732123469021/Put/seqid=0 2024-11-20T17:24:29,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741889_1065 (size=12151) 2024-11-20T17:24:29,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/269093d9515643b594b097a6a8de4b35 2024-11-20T17:24:29,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/337cb931df7f41a4a9263401d61fd3e5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/337cb931df7f41a4a9263401d61fd3e5 2024-11-20T17:24:29,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/337cb931df7f41a4a9263401d61fd3e5, entries=200, sequenceid=237, filesize=14.2 K 2024-11-20T17:24:29,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/9dba84434c364406ac32986e5350efae as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9dba84434c364406ac32986e5350efae 2024-11-20T17:24:29,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123529779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123529781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123529782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123529780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123529782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:29,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9dba84434c364406ac32986e5350efae, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T17:24:29,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/269093d9515643b594b097a6a8de4b35 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/269093d9515643b594b097a6a8de4b35 2024-11-20T17:24:29,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/269093d9515643b594b097a6a8de4b35, entries=150, sequenceid=237, filesize=11.9 K 2024-11-20T17:24:29,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=87.22 KB/89310 for 895da877845d8163116b8248e2bc3ffc in 160ms, sequenceid=237, compaction requested=false 2024-11-20T17:24:29,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:29,808 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:29,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T17:24:29,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,809 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T17:24:29,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:29,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:29,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:29,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f0d9a81294c64a748f14461eac0dd78b is 50, key is test_row_0/A:col10/1732123469669/Put/seqid=0 2024-11-20T17:24:29,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741890_1066 (size=12151) 2024-11-20T17:24:29,833 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f0d9a81294c64a748f14461eac0dd78b 2024-11-20T17:24:29,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/1b2b0627e28a4d9aa673cc5f8c26c6c2 is 50, key is test_row_0/B:col10/1732123469669/Put/seqid=0 2024-11-20T17:24:29,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741891_1067 (size=12151) 2024-11-20T17:24:29,867 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/1b2b0627e28a4d9aa673cc5f8c26c6c2 2024-11-20T17:24:29,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/20ab1351e8ac4f54b61a013ae6d1ba1c is 50, key is test_row_0/C:col10/1732123469669/Put/seqid=0 2024-11-20T17:24:29,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741892_1068 (size=12151) 2024-11-20T17:24:29,899 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/20ab1351e8ac4f54b61a013ae6d1ba1c 2024-11-20T17:24:29,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f0d9a81294c64a748f14461eac0dd78b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f0d9a81294c64a748f14461eac0dd78b 2024-11-20T17:24:29,918 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f0d9a81294c64a748f14461eac0dd78b, entries=150, sequenceid=255, filesize=11.9 K 2024-11-20T17:24:29,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/1b2b0627e28a4d9aa673cc5f8c26c6c2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1b2b0627e28a4d9aa673cc5f8c26c6c2 2024-11-20T17:24:29,928 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1b2b0627e28a4d9aa673cc5f8c26c6c2, entries=150, sequenceid=255, filesize=11.9 K 2024-11-20T17:24:29,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/20ab1351e8ac4f54b61a013ae6d1ba1c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/20ab1351e8ac4f54b61a013ae6d1ba1c 2024-11-20T17:24:29,936 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/20ab1351e8ac4f54b61a013ae6d1ba1c, entries=150, sequenceid=255, filesize=11.9 K 2024-11-20T17:24:29,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T17:24:29,938 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=0 B/0 for 895da877845d8163116b8248e2bc3ffc in 129ms, sequenceid=255, compaction requested=true 2024-11-20T17:24:29,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:29,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:29,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T17:24:29,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T17:24:29,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T17:24:29,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 905 msec 2024-11-20T17:24:29,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 911 msec 2024-11-20T17:24:29,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:29,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:24:29,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:29,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:29,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:29,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:29,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:30,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/2ec5b4080e384eaeb6ed8bfd72a750e0 is 50, key is test_row_0/A:col10/1732123469993/Put/seqid=0 2024-11-20T17:24:30,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123530022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123530024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123530025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123530027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123530028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741893_1069 (size=12301) 2024-11-20T17:24:30,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/2ec5b4080e384eaeb6ed8bfd72a750e0 2024-11-20T17:24:30,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/2b27402dc7694dd48cabbbc5591f3519 is 50, key is test_row_0/B:col10/1732123469993/Put/seqid=0 2024-11-20T17:24:30,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741894_1070 (size=12301) 2024-11-20T17:24:30,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123530130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123530131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123530132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123530132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123530132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T17:24:30,140 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T17:24:30,142 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:30,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T17:24:30,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:24:30,144 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:30,144 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:30,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:30,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:24:30,297 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:30,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T17:24:30,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:30,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:30,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:30,298 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:30,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:30,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:30,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123530335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123530336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123530337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123530337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123530339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:24:30,451 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:30,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T17:24:30,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:30,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:30,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:30,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:30,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:30,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/2b27402dc7694dd48cabbbc5591f3519 2024-11-20T17:24:30,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/1550d6a42213409e82c2f511f59dc9ee is 50, key is test_row_0/C:col10/1732123469993/Put/seqid=0 2024-11-20T17:24:30,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741895_1071 (size=12301) 2024-11-20T17:24:30,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/1550d6a42213409e82c2f511f59dc9ee 2024-11-20T17:24:30,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/2ec5b4080e384eaeb6ed8bfd72a750e0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2ec5b4080e384eaeb6ed8bfd72a750e0 2024-11-20T17:24:30,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2ec5b4080e384eaeb6ed8bfd72a750e0, entries=150, sequenceid=268, filesize=12.0 K 2024-11-20T17:24:30,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/2b27402dc7694dd48cabbbc5591f3519 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2b27402dc7694dd48cabbbc5591f3519 2024-11-20T17:24:30,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2b27402dc7694dd48cabbbc5591f3519, entries=150, sequenceid=268, filesize=12.0 K 2024-11-20T17:24:30,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/1550d6a42213409e82c2f511f59dc9ee as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/1550d6a42213409e82c2f511f59dc9ee 2024-11-20T17:24:30,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/1550d6a42213409e82c2f511f59dc9ee, entries=150, sequenceid=268, filesize=12.0 K 2024-11-20T17:24:30,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 895da877845d8163116b8248e2bc3ffc in 538ms, sequenceid=268, compaction requested=true 2024-11-20T17:24:30,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:30,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:30,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:30,536 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:24:30,536 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:24:30,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:30,538 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51656 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:24:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:30,538 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:30,538 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:30,538 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7da581a06b694ba69b59237fa8553f83, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/337cb931df7f41a4a9263401d61fd3e5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f0d9a81294c64a748f14461eac0dd78b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2ec5b4080e384eaeb6ed8bfd72a750e0] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=50.4 K 2024-11-20T17:24:30,538 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:24:30,538 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:30,538 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:30,539 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/bef7de201b30464a88ffa2c06b0ea387, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9dba84434c364406ac32986e5350efae, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1b2b0627e28a4d9aa673cc5f8c26c6c2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2b27402dc7694dd48cabbbc5591f3519] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=48.1 K 2024-11-20T17:24:30,539 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7da581a06b694ba69b59237fa8553f83, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123468372 2024-11-20T17:24:30,539 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting bef7de201b30464a88ffa2c06b0ea387, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123468372 2024-11-20T17:24:30,539 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 337cb931df7f41a4a9263401d61fd3e5, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732123469018 2024-11-20T17:24:30,540 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dba84434c364406ac32986e5350efae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732123469020 2024-11-20T17:24:30,540 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0d9a81294c64a748f14461eac0dd78b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732123469669 2024-11-20T17:24:30,540 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b2b0627e28a4d9aa673cc5f8c26c6c2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732123469669 2024-11-20T17:24:30,541 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ec5b4080e384eaeb6ed8bfd72a750e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732123469988 2024-11-20T17:24:30,541 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b27402dc7694dd48cabbbc5591f3519, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732123469988 2024-11-20T17:24:30,567 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:30,568 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/468413d9cbc8434fa3c847b3fcd727bd is 50, key is test_row_0/B:col10/1732123469993/Put/seqid=0 2024-11-20T17:24:30,570 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:30,571 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/d939a972f016433a8a0fbf7cd735b3f6 is 50, key is test_row_0/A:col10/1732123469993/Put/seqid=0 2024-11-20T17:24:30,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741896_1072 (size=12949) 2024-11-20T17:24:30,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741897_1073 (size=12949) 2024-11-20T17:24:30,597 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/468413d9cbc8434fa3c847b3fcd727bd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/468413d9cbc8434fa3c847b3fcd727bd 2024-11-20T17:24:30,599 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/d939a972f016433a8a0fbf7cd735b3f6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d939a972f016433a8a0fbf7cd735b3f6 2024-11-20T17:24:30,607 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into 468413d9cbc8434fa3c847b3fcd727bd(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:30,607 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:30,608 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=12, startTime=1732123470536; duration=0sec 2024-11-20T17:24:30,608 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:30,608 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:30,608 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:30,608 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:24:30,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T17:24:30,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:30,609 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:24:30,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:30,611 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:24:30,611 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:30,611 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:30,611 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into d939a972f016433a8a0fbf7cd735b3f6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:30,611 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/613e3be5892f43e7850614a6a2adda20, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/269093d9515643b594b097a6a8de4b35, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/20ab1351e8ac4f54b61a013ae6d1ba1c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/1550d6a42213409e82c2f511f59dc9ee] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=48.1 K 2024-11-20T17:24:30,611 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:30,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:30,611 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=12, startTime=1732123470535; duration=0sec 2024-11-20T17:24:30,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:30,612 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:30,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:30,612 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:30,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:30,612 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 613e3be5892f43e7850614a6a2adda20, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732123468372 2024-11-20T17:24:30,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:30,612 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 269093d9515643b594b097a6a8de4b35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732123469020 2024-11-20T17:24:30,613 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 20ab1351e8ac4f54b61a013ae6d1ba1c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732123469669 2024-11-20T17:24:30,614 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 1550d6a42213409e82c2f511f59dc9ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732123469988 2024-11-20T17:24:30,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/b5618eafa8f94188bbefb5549656d7f6 is 50, key is test_row_0/A:col10/1732123470026/Put/seqid=0 2024-11-20T17:24:30,645 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#60 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:30,646 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/6b5002b6d4e648c99d70eb0f401b9fe8 is 50, key is test_row_0/C:col10/1732123469993/Put/seqid=0 2024-11-20T17:24:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:30,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:30,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123530657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123530657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123530660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123530663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741898_1074 (size=12301) 2024-11-20T17:24:30,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123530664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741899_1075 (size=12949) 2024-11-20T17:24:30,693 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/6b5002b6d4e648c99d70eb0f401b9fe8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/6b5002b6d4e648c99d70eb0f401b9fe8 2024-11-20T17:24:30,706 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 6b5002b6d4e648c99d70eb0f401b9fe8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:30,707 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:30,707 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=12, startTime=1732123470538; duration=0sec 2024-11-20T17:24:30,707 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:30,707 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:30,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:24:30,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123530765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123530766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123530768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123530769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123530771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123530969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123530969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123530971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123530973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:30,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:30,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123530973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,069 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/b5618eafa8f94188bbefb5549656d7f6 2024-11-20T17:24:31,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/88493592d3114c87a88145afa2ff6ede is 50, key is test_row_0/B:col10/1732123470026/Put/seqid=0 2024-11-20T17:24:31,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741900_1076 (size=12301) 2024-11-20T17:24:31,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:24:31,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123531273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123531273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123531275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123531276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123531278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,496 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/88493592d3114c87a88145afa2ff6ede 2024-11-20T17:24:31,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/fd5dee64f3cb478fbdec8a53c66641a3 is 50, key is test_row_0/C:col10/1732123470026/Put/seqid=0 2024-11-20T17:24:31,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741901_1077 (size=12301) 2024-11-20T17:24:31,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123531776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123531777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123531781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123531782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123531784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:31,928 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/fd5dee64f3cb478fbdec8a53c66641a3 2024-11-20T17:24:31,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/b5618eafa8f94188bbefb5549656d7f6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/b5618eafa8f94188bbefb5549656d7f6 2024-11-20T17:24:31,947 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/b5618eafa8f94188bbefb5549656d7f6, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T17:24:31,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/88493592d3114c87a88145afa2ff6ede as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/88493592d3114c87a88145afa2ff6ede 2024-11-20T17:24:31,964 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/88493592d3114c87a88145afa2ff6ede, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T17:24:31,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/fd5dee64f3cb478fbdec8a53c66641a3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/fd5dee64f3cb478fbdec8a53c66641a3 2024-11-20T17:24:31,975 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/fd5dee64f3cb478fbdec8a53c66641a3, entries=150, sequenceid=293, filesize=12.0 K 2024-11-20T17:24:31,976 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 895da877845d8163116b8248e2bc3ffc in 1367ms, sequenceid=293, compaction requested=false 2024-11-20T17:24:31,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:31,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:31,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T17:24:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T17:24:31,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T17:24:31,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8330 sec 2024-11-20T17:24:31,983 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.8390 sec 2024-11-20T17:24:32,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T17:24:32,249 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T17:24:32,255 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:32,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T17:24:32,258 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:32,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:24:32,259 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:32,259 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:32,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:24:32,412 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:32,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T17:24:32,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:32,413 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:24:32,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:32,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:32,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:32,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:32,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:32,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:32,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/38360526ace144eb86f4a40a86250eb5 is 50, key is test_row_0/A:col10/1732123470652/Put/seqid=0 2024-11-20T17:24:32,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741902_1078 (size=12301) 2024-11-20T17:24:32,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:24:32,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:32,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:32,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123532803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123532805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123532805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123532807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123532807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,839 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/38360526ace144eb86f4a40a86250eb5 2024-11-20T17:24:32,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/e3fb7dd468604fad93769fd545250106 is 50, key is test_row_0/B:col10/1732123470652/Put/seqid=0 2024-11-20T17:24:32,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:24:32,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741903_1079 (size=12301) 2024-11-20T17:24:32,863 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/e3fb7dd468604fad93769fd545250106 2024-11-20T17:24:32,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/de21175dbb7547d2b576931f4af72c94 is 50, key is test_row_0/C:col10/1732123470652/Put/seqid=0 2024-11-20T17:24:32,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741904_1080 (size=12301) 2024-11-20T17:24:32,887 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/de21175dbb7547d2b576931f4af72c94 2024-11-20T17:24:32,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/38360526ace144eb86f4a40a86250eb5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/38360526ace144eb86f4a40a86250eb5 2024-11-20T17:24:32,901 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/38360526ace144eb86f4a40a86250eb5, entries=150, sequenceid=308, filesize=12.0 K 2024-11-20T17:24:32,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/e3fb7dd468604fad93769fd545250106 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e3fb7dd468604fad93769fd545250106 2024-11-20T17:24:32,909 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e3fb7dd468604fad93769fd545250106, entries=150, sequenceid=308, filesize=12.0 K 2024-11-20T17:24:32,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/de21175dbb7547d2b576931f4af72c94 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/de21175dbb7547d2b576931f4af72c94 2024-11-20T17:24:32,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123532909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123532908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123532909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123532912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:32,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123532914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:32,919 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/de21175dbb7547d2b576931f4af72c94, entries=150, sequenceid=308, filesize=12.0 K 2024-11-20T17:24:32,920 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 895da877845d8163116b8248e2bc3ffc in 507ms, sequenceid=308, compaction requested=true 2024-11-20T17:24:32,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:32,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:32,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T17:24:32,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T17:24:32,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T17:24:32,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 664 msec 2024-11-20T17:24:32,928 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 672 msec 2024-11-20T17:24:33,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:24:33,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:33,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:33,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:33,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:33,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:33,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:33,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:33,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/d1c75eb0303444a594aa7160e8ffbe3f is 50, key is test_row_0/A:col10/1732123473116/Put/seqid=0 2024-11-20T17:24:33,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123533133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123533134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123533135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123533135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741905_1081 (size=12301) 2024-11-20T17:24:33,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123533139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123533241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123533241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123533243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123533243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123533245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T17:24:33,362 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T17:24:33,364 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:33,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T17:24:33,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:24:33,366 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:33,367 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:33,367 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:33,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123533443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123533443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123533446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123533447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123533447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:24:33,519 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:33,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T17:24:33,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:33,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:33,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:33,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:33,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:33,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:33,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/d1c75eb0303444a594aa7160e8ffbe3f 2024-11-20T17:24:33,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/530984e7b59a4745a928867dfe3f4247 is 50, key is test_row_0/B:col10/1732123473116/Put/seqid=0 2024-11-20T17:24:33,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741906_1082 (size=12301) 2024-11-20T17:24:33,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/530984e7b59a4745a928867dfe3f4247 2024-11-20T17:24:33,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/4a80f10cd0f44a5ea9e3e2668d68f80a is 50, key is test_row_0/C:col10/1732123473116/Put/seqid=0 2024-11-20T17:24:33,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741907_1083 (size=12301) 2024-11-20T17:24:33,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/4a80f10cd0f44a5ea9e3e2668d68f80a 2024-11-20T17:24:33,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/d1c75eb0303444a594aa7160e8ffbe3f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d1c75eb0303444a594aa7160e8ffbe3f 2024-11-20T17:24:33,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d1c75eb0303444a594aa7160e8ffbe3f, entries=150, sequenceid=333, filesize=12.0 K 2024-11-20T17:24:33,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/530984e7b59a4745a928867dfe3f4247 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/530984e7b59a4745a928867dfe3f4247 2024-11-20T17:24:33,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/530984e7b59a4745a928867dfe3f4247, entries=150, sequenceid=333, filesize=12.0 K 2024-11-20T17:24:33,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/4a80f10cd0f44a5ea9e3e2668d68f80a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4a80f10cd0f44a5ea9e3e2668d68f80a 2024-11-20T17:24:33,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4a80f10cd0f44a5ea9e3e2668d68f80a, entries=150, sequenceid=333, filesize=12.0 K 2024-11-20T17:24:33,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 895da877845d8163116b8248e2bc3ffc in 511ms, sequenceid=333, compaction requested=true 2024-11-20T17:24:33,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:33,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:33,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:33,635 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:24:33,635 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:24:33,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:33,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:33,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:33,635 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:33,637 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:24:33,637 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:33,637 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:33,637 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/468413d9cbc8434fa3c847b3fcd727bd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/88493592d3114c87a88145afa2ff6ede, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e3fb7dd468604fad93769fd545250106, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/530984e7b59a4745a928867dfe3f4247] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=48.7 K 2024-11-20T17:24:33,638 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:24:33,638 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:33,638 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:33,638 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d939a972f016433a8a0fbf7cd735b3f6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/b5618eafa8f94188bbefb5549656d7f6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/38360526ace144eb86f4a40a86250eb5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d1c75eb0303444a594aa7160e8ffbe3f] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=48.7 K 2024-11-20T17:24:33,638 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 468413d9cbc8434fa3c847b3fcd727bd, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732123469988 2024-11-20T17:24:33,639 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting d939a972f016433a8a0fbf7cd735b3f6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732123469988 2024-11-20T17:24:33,639 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 88493592d3114c87a88145afa2ff6ede, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732123470018 2024-11-20T17:24:33,640 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5618eafa8f94188bbefb5549656d7f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732123470018 2024-11-20T17:24:33,644 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38360526ace144eb86f4a40a86250eb5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732123470652 2024-11-20T17:24:33,644 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting e3fb7dd468604fad93769fd545250106, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732123470652 2024-11-20T17:24:33,644 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 530984e7b59a4745a928867dfe3f4247, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732123473116 2024-11-20T17:24:33,644 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1c75eb0303444a594aa7160e8ffbe3f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732123473116 2024-11-20T17:24:33,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:24:33,670 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#69 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:33,671 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/adc42c77124a43c48d835e30358dbe43 is 50, key is test_row_0/A:col10/1732123473116/Put/seqid=0 2024-11-20T17:24:33,673 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:33,674 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:33,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T17:24:33,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:33,674 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:24:33,675 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/e607e37a0fe7434881e3988686d06aee is 50, key is test_row_0/B:col10/1732123473116/Put/seqid=0 2024-11-20T17:24:33,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:33,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:33,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:33,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:33,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:33,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:33,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/077982b281f24f53a021b8de40753164 is 50, key is test_row_0/A:col10/1732123473133/Put/seqid=0 2024-11-20T17:24:33,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741908_1084 (size=13085) 2024-11-20T17:24:33,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741909_1085 (size=13085) 2024-11-20T17:24:33,707 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/adc42c77124a43c48d835e30358dbe43 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/adc42c77124a43c48d835e30358dbe43 2024-11-20T17:24:33,715 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/e607e37a0fe7434881e3988686d06aee as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e607e37a0fe7434881e3988686d06aee 2024-11-20T17:24:33,717 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into adc42c77124a43c48d835e30358dbe43(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:33,717 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:33,717 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=12, startTime=1732123473635; duration=0sec 2024-11-20T17:24:33,717 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:33,717 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:33,717 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:24:33,719 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:24:33,719 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:33,720 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:33,720 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/6b5002b6d4e648c99d70eb0f401b9fe8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/fd5dee64f3cb478fbdec8a53c66641a3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/de21175dbb7547d2b576931f4af72c94, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4a80f10cd0f44a5ea9e3e2668d68f80a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=48.7 K 2024-11-20T17:24:33,721 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b5002b6d4e648c99d70eb0f401b9fe8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732123469988 2024-11-20T17:24:33,722 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd5dee64f3cb478fbdec8a53c66641a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732123470018 2024-11-20T17:24:33,722 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting de21175dbb7547d2b576931f4af72c94, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732123470652 2024-11-20T17:24:33,723 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a80f10cd0f44a5ea9e3e2668d68f80a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732123473116 2024-11-20T17:24:33,724 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into e607e37a0fe7434881e3988686d06aee(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:33,724 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:33,724 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=12, startTime=1732123473635; duration=0sec 2024-11-20T17:24:33,724 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:33,724 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:33,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741910_1086 (size=12301) 2024-11-20T17:24:33,740 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#72 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:33,741 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/38eddf1958d04294ba8b345aede8126a is 50, key is test_row_0/C:col10/1732123473116/Put/seqid=0 2024-11-20T17:24:33,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:33,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:33,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741911_1087 (size=13085) 2024-11-20T17:24:33,768 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/38eddf1958d04294ba8b345aede8126a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/38eddf1958d04294ba8b345aede8126a 2024-11-20T17:24:33,833 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 38eddf1958d04294ba8b345aede8126a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:33,833 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:33,833 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=12, startTime=1732123473635; duration=0sec 2024-11-20T17:24:33,833 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:33,833 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:33,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123533844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123533845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123533844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123533846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123533846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123533950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123533952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123533952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123533953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:33,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123533953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:33,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:24:34,126 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/077982b281f24f53a021b8de40753164 2024-11-20T17:24:34,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/4388cb4a990d4198ac1d65227bfd873c is 50, key is test_row_0/B:col10/1732123473133/Put/seqid=0 2024-11-20T17:24:34,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123534155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123534155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123534156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123534157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123534158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741912_1088 (size=12301) 2024-11-20T17:24:34,162 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/4388cb4a990d4198ac1d65227bfd873c 2024-11-20T17:24:34,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/4b5fb0a2136d44249308f546ae5d918d is 50, key is test_row_0/C:col10/1732123473133/Put/seqid=0 2024-11-20T17:24:34,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741913_1089 (size=12301) 2024-11-20T17:24:34,185 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/4b5fb0a2136d44249308f546ae5d918d 2024-11-20T17:24:34,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/077982b281f24f53a021b8de40753164 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/077982b281f24f53a021b8de40753164 2024-11-20T17:24:34,196 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/077982b281f24f53a021b8de40753164, entries=150, sequenceid=344, filesize=12.0 K 2024-11-20T17:24:34,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/4388cb4a990d4198ac1d65227bfd873c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4388cb4a990d4198ac1d65227bfd873c 2024-11-20T17:24:34,203 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4388cb4a990d4198ac1d65227bfd873c, entries=150, sequenceid=344, filesize=12.0 K 2024-11-20T17:24:34,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/4b5fb0a2136d44249308f546ae5d918d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4b5fb0a2136d44249308f546ae5d918d 2024-11-20T17:24:34,210 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4b5fb0a2136d44249308f546ae5d918d, entries=150, sequenceid=344, filesize=12.0 K 2024-11-20T17:24:34,211 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 895da877845d8163116b8248e2bc3ffc in 537ms, sequenceid=344, compaction requested=false 2024-11-20T17:24:34,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:34,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:34,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T17:24:34,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T17:24:34,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T17:24:34,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 846 msec 2024-11-20T17:24:34,216 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 851 msec 2024-11-20T17:24:34,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:34,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-20T17:24:34,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:34,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:34,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:34,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:34,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:34,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:34,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T17:24:34,469 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T17:24:34,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123534467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123534469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,471 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:34,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f32cbeb36bff47cd8ae639a565bdc11d is 50, key is test_row_0/A:col10/1732123473844/Put/seqid=0 2024-11-20T17:24:34,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123534470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-20T17:24:34,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123534471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:24:34,473 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:34,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123534472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,474 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:34,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:34,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741914_1090 (size=12301) 2024-11-20T17:24:34,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:24:34,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123534572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123534572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123534573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123534574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123534575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:34,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:34,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:34,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:34,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:34,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:34,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:34,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:34,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:24:34,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123534776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123534776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123534777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123534777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,779 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:34,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:34,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:34,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:34,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:34,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:34,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:34,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:34,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:34,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123534780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:34,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f32cbeb36bff47cd8ae639a565bdc11d 2024-11-20T17:24:34,891 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/063c35d6dd1f4271a608ec2467bde6e0 is 50, key is test_row_0/B:col10/1732123473844/Put/seqid=0 2024-11-20T17:24:34,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741915_1091 (size=12301) 2024-11-20T17:24:34,933 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:34,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:34,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:34,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:34,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:34,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:34,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:24:35,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123535079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123535081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123535082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123535082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123535084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,091 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:35,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:35,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:35,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:35,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/063c35d6dd1f4271a608ec2467bde6e0 2024-11-20T17:24:35,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a910d9c7643f4a31b509fda5140ac1d1 is 50, key is test_row_0/C:col10/1732123473844/Put/seqid=0 2024-11-20T17:24:35,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741916_1092 (size=12301) 2024-11-20T17:24:35,398 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:35,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:35,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:35,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,551 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:35,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:35,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:35,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:24:35,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123535584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123535585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123535586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123535588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:35,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123535589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:35,705 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:35,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:35,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:35,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:35,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a910d9c7643f4a31b509fda5140ac1d1 2024-11-20T17:24:35,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f32cbeb36bff47cd8ae639a565bdc11d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f32cbeb36bff47cd8ae639a565bdc11d 2024-11-20T17:24:35,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f32cbeb36bff47cd8ae639a565bdc11d, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T17:24:35,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/063c35d6dd1f4271a608ec2467bde6e0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/063c35d6dd1f4271a608ec2467bde6e0 2024-11-20T17:24:35,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/063c35d6dd1f4271a608ec2467bde6e0, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T17:24:35,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/a910d9c7643f4a31b509fda5140ac1d1 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a910d9c7643f4a31b509fda5140ac1d1 2024-11-20T17:24:35,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a910d9c7643f4a31b509fda5140ac1d1, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T17:24:35,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=33.54 KB/34350 for 895da877845d8163116b8248e2bc3ffc in 1278ms, sequenceid=377, compaction requested=true 2024-11-20T17:24:35,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:35,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:35,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:35,743 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:35,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:35,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:35,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:35,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:35,743 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:35,744 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:35,744 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:35,744 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,744 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:35,744 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/adc42c77124a43c48d835e30358dbe43, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/077982b281f24f53a021b8de40753164, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f32cbeb36bff47cd8ae639a565bdc11d] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=36.8 K 2024-11-20T17:24:35,744 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:35,745 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,745 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e607e37a0fe7434881e3988686d06aee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4388cb4a990d4198ac1d65227bfd873c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/063c35d6dd1f4271a608ec2467bde6e0] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=36.8 K 2024-11-20T17:24:35,745 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting adc42c77124a43c48d835e30358dbe43, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732123473116 2024-11-20T17:24:35,745 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting e607e37a0fe7434881e3988686d06aee, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732123473116 2024-11-20T17:24:35,745 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 077982b281f24f53a021b8de40753164, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732123473129 2024-11-20T17:24:35,746 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4388cb4a990d4198ac1d65227bfd873c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732123473129 2024-11-20T17:24:35,746 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting f32cbeb36bff47cd8ae639a565bdc11d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732123473844 2024-11-20T17:24:35,746 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 063c35d6dd1f4271a608ec2467bde6e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732123473844 2024-11-20T17:24:35,756 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#78 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:35,757 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/e101c8ab182845eabed3e3dc2bd09625 is 50, key is test_row_0/B:col10/1732123473844/Put/seqid=0 2024-11-20T17:24:35,759 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:35,760 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/700f56426784471ebf3deff157b15489 is 50, key is test_row_0/A:col10/1732123473844/Put/seqid=0 2024-11-20T17:24:35,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741917_1093 (size=13187) 2024-11-20T17:24:35,777 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/e101c8ab182845eabed3e3dc2bd09625 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e101c8ab182845eabed3e3dc2bd09625 2024-11-20T17:24:35,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741918_1094 (size=13187) 2024-11-20T17:24:35,786 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into e101c8ab182845eabed3e3dc2bd09625(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:35,786 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:35,786 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123475743; duration=0sec 2024-11-20T17:24:35,786 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:35,786 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:35,786 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:35,786 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/700f56426784471ebf3deff157b15489 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/700f56426784471ebf3deff157b15489 2024-11-20T17:24:35,789 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:35,789 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:35,790 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,790 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/38eddf1958d04294ba8b345aede8126a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4b5fb0a2136d44249308f546ae5d918d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a910d9c7643f4a31b509fda5140ac1d1] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=36.8 K 2024-11-20T17:24:35,790 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 38eddf1958d04294ba8b345aede8126a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732123473116 2024-11-20T17:24:35,791 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b5fb0a2136d44249308f546ae5d918d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1732123473129 2024-11-20T17:24:35,791 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a910d9c7643f4a31b509fda5140ac1d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732123473844 2024-11-20T17:24:35,797 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into 700f56426784471ebf3deff157b15489(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:35,797 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:35,797 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123475743; duration=0sec 2024-11-20T17:24:35,797 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:35,797 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:35,806 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#80 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:35,807 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/25b437bedce74c91b315af64f4b9c558 is 50, key is test_row_0/C:col10/1732123473844/Put/seqid=0 2024-11-20T17:24:35,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741919_1095 (size=13187) 2024-11-20T17:24:35,859 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:35,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-20T17:24:35,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:35,860 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:24:35,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:35,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:35,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:35,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:35,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:35,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d is 50, key is test_row_0/A:col10/1732123474468/Put/seqid=0 2024-11-20T17:24:35,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741920_1096 (size=12301) 2024-11-20T17:24:36,228 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/25b437bedce74c91b315af64f4b9c558 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/25b437bedce74c91b315af64f4b9c558 2024-11-20T17:24:36,237 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 25b437bedce74c91b315af64f4b9c558(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:36,237 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:36,237 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123475743; duration=0sec 2024-11-20T17:24:36,237 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:36,237 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:36,272 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d 2024-11-20T17:24:36,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/6bb061651e054b38b6c6d0a69bc49a6e is 50, key is test_row_0/B:col10/1732123474468/Put/seqid=0 2024-11-20T17:24:36,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741921_1097 (size=12301) 2024-11-20T17:24:36,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:24:36,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:36,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:36,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123536614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123536615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123536616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123536617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123536617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,687 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/6bb061651e054b38b6c6d0a69bc49a6e 2024-11-20T17:24:36,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/282728b5d3fb4b8b82eef6026ab4c2e1 is 50, key is test_row_0/C:col10/1732123474468/Put/seqid=0 2024-11-20T17:24:36,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741922_1098 (size=12301) 2024-11-20T17:24:36,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123536720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123536720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123536720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123536721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123536721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123536924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123536924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123536924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123536924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:36,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123536925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,108 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/282728b5d3fb4b8b82eef6026ab4c2e1 2024-11-20T17:24:37,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d 2024-11-20T17:24:37,119 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d, entries=150, sequenceid=387, filesize=12.0 K 2024-11-20T17:24:37,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/6bb061651e054b38b6c6d0a69bc49a6e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6bb061651e054b38b6c6d0a69bc49a6e 2024-11-20T17:24:37,125 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6bb061651e054b38b6c6d0a69bc49a6e, entries=150, sequenceid=387, filesize=12.0 K 2024-11-20T17:24:37,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/282728b5d3fb4b8b82eef6026ab4c2e1 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/282728b5d3fb4b8b82eef6026ab4c2e1 2024-11-20T17:24:37,132 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/282728b5d3fb4b8b82eef6026ab4c2e1, entries=150, sequenceid=387, filesize=12.0 K 2024-11-20T17:24:37,133 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for 895da877845d8163116b8248e2bc3ffc in 1273ms, sequenceid=387, compaction requested=false 2024-11-20T17:24:37,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:37,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:37,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-20T17:24:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-20T17:24:37,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T17:24:37,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6610 sec 2024-11-20T17:24:37,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 2.6660 sec 2024-11-20T17:24:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:37,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-20T17:24:37,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:37,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:37,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:37,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:37,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:37,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:37,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123537231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123537231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123537232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123537234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123537237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/aefa7bf1af264b3abadcf647f0b8a6fd is 50, key is test_row_0/A:col10/1732123476615/Put/seqid=0 2024-11-20T17:24:37,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741923_1099 (size=12301) 2024-11-20T17:24:37,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123537338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123537338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123537338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123537338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123537338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123537540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123537541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123537541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123537542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123537543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/aefa7bf1af264b3abadcf647f0b8a6fd 2024-11-20T17:24:37,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/4337b22286514d929b7740a27bbc39f2 is 50, key is test_row_0/B:col10/1732123476615/Put/seqid=0 2024-11-20T17:24:37,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741924_1100 (size=12301) 2024-11-20T17:24:37,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123537845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123537845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123537845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123537846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:37,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:37,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123537847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:38,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/4337b22286514d929b7740a27bbc39f2 2024-11-20T17:24:38,097 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/bb61c56304314889ba619e513016a7c0 is 50, key is test_row_0/C:col10/1732123476615/Put/seqid=0 2024-11-20T17:24:38,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741925_1101 (size=12301) 2024-11-20T17:24:38,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123538351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:38,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:38,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123538352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123538351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:38,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:38,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123538352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123538352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:38,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/bb61c56304314889ba619e513016a7c0 2024-11-20T17:24:38,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/aefa7bf1af264b3abadcf647f0b8a6fd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/aefa7bf1af264b3abadcf647f0b8a6fd 2024-11-20T17:24:38,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/aefa7bf1af264b3abadcf647f0b8a6fd, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T17:24:38,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/4337b22286514d929b7740a27bbc39f2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4337b22286514d929b7740a27bbc39f2 2024-11-20T17:24:38,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4337b22286514d929b7740a27bbc39f2, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T17:24:38,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/bb61c56304314889ba619e513016a7c0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bb61c56304314889ba619e513016a7c0 2024-11-20T17:24:38,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bb61c56304314889ba619e513016a7c0, entries=150, sequenceid=420, filesize=12.0 K 2024-11-20T17:24:38,544 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=13.42 KB/13740 for 895da877845d8163116b8248e2bc3ffc in 1314ms, sequenceid=420, compaction requested=true 2024-11-20T17:24:38,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:38,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:38,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:38,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:38,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:38,544 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:38,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:38,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:38,544 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:38,546 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:38,546 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:38,546 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:38,546 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:38,546 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:38,546 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:38,546 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/700f56426784471ebf3deff157b15489, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/aefa7bf1af264b3abadcf647f0b8a6fd] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=36.9 K 2024-11-20T17:24:38,546 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e101c8ab182845eabed3e3dc2bd09625, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6bb061651e054b38b6c6d0a69bc49a6e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4337b22286514d929b7740a27bbc39f2] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=36.9 K 2024-11-20T17:24:38,547 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting e101c8ab182845eabed3e3dc2bd09625, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732123473844 2024-11-20T17:24:38,547 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 700f56426784471ebf3deff157b15489, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732123473844 2024-11-20T17:24:38,547 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bb061651e054b38b6c6d0a69bc49a6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732123474468 2024-11-20T17:24:38,547 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4e2f5ec7fdf4bd29ac658bb9b907f9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732123474468 2024-11-20T17:24:38,548 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4337b22286514d929b7740a27bbc39f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732123476615 2024-11-20T17:24:38,548 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting aefa7bf1af264b3abadcf647f0b8a6fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732123476615 2024-11-20T17:24:38,559 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:38,559 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:38,559 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/7b814993c5df4dffbaf9a130154aa42e is 50, key is test_row_0/B:col10/1732123476615/Put/seqid=0 2024-11-20T17:24:38,560 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/cb68e78682324f6fac36d1f76ccc4c85 is 50, key is test_row_0/A:col10/1732123476615/Put/seqid=0 2024-11-20T17:24:38,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741926_1102 (size=13289) 2024-11-20T17:24:38,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741927_1103 (size=13289) 2024-11-20T17:24:38,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T17:24:38,578 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T17:24:38,579 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:38,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-20T17:24:38,581 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:38,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:24:38,582 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:38,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:38,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:24:38,735 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:38,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-20T17:24:38,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:38,736 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-20T17:24:38,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:38,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:38,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:38,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:38,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:38,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:38,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f8800fca177a4bcfaa3df0a96e5d8bee is 50, key is test_row_1/A:col10/1732123477232/Put/seqid=0 2024-11-20T17:24:38,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741928_1104 (size=9857) 2024-11-20T17:24:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:24:38,980 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/cb68e78682324f6fac36d1f76ccc4c85 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/cb68e78682324f6fac36d1f76ccc4c85 2024-11-20T17:24:38,980 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/7b814993c5df4dffbaf9a130154aa42e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7b814993c5df4dffbaf9a130154aa42e 2024-11-20T17:24:38,988 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into 7b814993c5df4dffbaf9a130154aa42e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:38,988 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into cb68e78682324f6fac36d1f76ccc4c85(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:38,988 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:38,988 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:38,988 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123478544; duration=0sec 2024-11-20T17:24:38,988 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123478544; duration=0sec 2024-11-20T17:24:38,988 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:38,988 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:38,988 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:38,988 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:38,988 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:38,990 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:38,990 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:38,990 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:38,990 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/25b437bedce74c91b315af64f4b9c558, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/282728b5d3fb4b8b82eef6026ab4c2e1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bb61c56304314889ba619e513016a7c0] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=36.9 K 2024-11-20T17:24:38,990 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25b437bedce74c91b315af64f4b9c558, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1732123473844 2024-11-20T17:24:38,991 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 282728b5d3fb4b8b82eef6026ab4c2e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1732123474468 2024-11-20T17:24:38,991 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb61c56304314889ba619e513016a7c0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732123476615 2024-11-20T17:24:39,000 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:39,001 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/59e1325c0ff54acab6bf4bd59a30fc5a is 50, key is test_row_0/C:col10/1732123476615/Put/seqid=0 2024-11-20T17:24:39,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741929_1105 (size=13289) 2024-11-20T17:24:39,148 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f8800fca177a4bcfaa3df0a96e5d8bee 2024-11-20T17:24:39,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/9ce4eb13c5f14266a551bddd26901e7c is 50, key is test_row_1/B:col10/1732123477232/Put/seqid=0 2024-11-20T17:24:39,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741930_1106 (size=9857) 2024-11-20T17:24:39,162 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/9ce4eb13c5f14266a551bddd26901e7c 2024-11-20T17:24:39,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/9bbf57abe56a4f33850658cb41057bd2 is 50, key is test_row_1/C:col10/1732123477232/Put/seqid=0 2024-11-20T17:24:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:24:39,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741931_1107 (size=9857) 2024-11-20T17:24:39,188 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/9bbf57abe56a4f33850658cb41057bd2 2024-11-20T17:24:39,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/f8800fca177a4bcfaa3df0a96e5d8bee as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f8800fca177a4bcfaa3df0a96e5d8bee 2024-11-20T17:24:39,201 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f8800fca177a4bcfaa3df0a96e5d8bee, entries=100, sequenceid=425, filesize=9.6 K 2024-11-20T17:24:39,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/9ce4eb13c5f14266a551bddd26901e7c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9ce4eb13c5f14266a551bddd26901e7c 2024-11-20T17:24:39,208 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9ce4eb13c5f14266a551bddd26901e7c, entries=100, sequenceid=425, filesize=9.6 K 2024-11-20T17:24:39,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/9bbf57abe56a4f33850658cb41057bd2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9bbf57abe56a4f33850658cb41057bd2 2024-11-20T17:24:39,215 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9bbf57abe56a4f33850658cb41057bd2, entries=100, sequenceid=425, filesize=9.6 K 2024-11-20T17:24:39,218 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 895da877845d8163116b8248e2bc3ffc in 481ms, sequenceid=425, compaction requested=false 2024-11-20T17:24:39,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:39,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:39,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-20T17:24:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-20T17:24:39,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T17:24:39,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 637 msec 2024-11-20T17:24:39,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 643 msec 2024-11-20T17:24:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:39,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:24:39,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:39,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:39,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:39,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:39,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:39,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:39,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/58bc67526c464b7cb4ea1dc407b38878 is 50, key is test_row_0/A:col10/1732123479369/Put/seqid=0 2024-11-20T17:24:39,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741932_1108 (size=14741) 2024-11-20T17:24:39,413 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/59e1325c0ff54acab6bf4bd59a30fc5a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/59e1325c0ff54acab6bf4bd59a30fc5a 2024-11-20T17:24:39,420 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 59e1325c0ff54acab6bf4bd59a30fc5a(size=13.0 K), total size for store is 22.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:39,420 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:39,420 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123478544; duration=0sec 2024-11-20T17:24:39,420 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:39,420 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:39,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123539422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123539422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123539423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123539423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123539425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123539528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123539528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123539528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123539528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123539528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-20T17:24:39,685 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-20T17:24:39,686 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:39,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-20T17:24:39,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:24:39,688 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:39,689 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:39,689 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:39,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123539731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123539731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123539731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123539732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:39,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123539733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:39,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/58bc67526c464b7cb4ea1dc407b38878 2024-11-20T17:24:39,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:24:39,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/86236fc56f1c4e4491486e79b9f1e4ce is 50, key is test_row_0/B:col10/1732123479369/Put/seqid=0 2024-11-20T17:24:39,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741933_1109 (size=12301) 2024-11-20T17:24:39,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:39,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:24:39,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:39,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:39,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:39,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:39,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:39,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:24:39,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:39,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:24:39,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:39,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:39,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:39,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:39,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:40,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123540034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123540035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123540035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123540036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123540036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,146 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:40,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:24:40,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:40,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:40,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:40,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:40,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:40,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/86236fc56f1c4e4491486e79b9f1e4ce 2024-11-20T17:24:40,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/b3979dfbfe0d4a2287dfc737d0f2fdb8 is 50, key is test_row_0/C:col10/1732123479369/Put/seqid=0 2024-11-20T17:24:40,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741934_1110 (size=12301) 2024-11-20T17:24:40,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/b3979dfbfe0d4a2287dfc737d0f2fdb8 2024-11-20T17:24:40,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/58bc67526c464b7cb4ea1dc407b38878 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/58bc67526c464b7cb4ea1dc407b38878 2024-11-20T17:24:40,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/58bc67526c464b7cb4ea1dc407b38878, entries=200, sequenceid=439, filesize=14.4 K 2024-11-20T17:24:40,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/86236fc56f1c4e4491486e79b9f1e4ce as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/86236fc56f1c4e4491486e79b9f1e4ce 2024-11-20T17:24:40,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/86236fc56f1c4e4491486e79b9f1e4ce, entries=150, sequenceid=439, filesize=12.0 K 2024-11-20T17:24:40,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/b3979dfbfe0d4a2287dfc737d0f2fdb8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/b3979dfbfe0d4a2287dfc737d0f2fdb8 2024-11-20T17:24:40,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/b3979dfbfe0d4a2287dfc737d0f2fdb8, entries=150, sequenceid=439, filesize=12.0 K 2024-11-20T17:24:40,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 895da877845d8163116b8248e2bc3ffc in 873ms, sequenceid=439, compaction requested=true 2024-11-20T17:24:40,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:40,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:40,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:40,242 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:40,242 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:40,242 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:40,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:40,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:40,243 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:40,244 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:40,244 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37887 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:40,244 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:40,244 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:40,244 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:40,244 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:40,244 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/cb68e78682324f6fac36d1f76ccc4c85, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f8800fca177a4bcfaa3df0a96e5d8bee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/58bc67526c464b7cb4ea1dc407b38878] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=37.0 K 2024-11-20T17:24:40,244 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7b814993c5df4dffbaf9a130154aa42e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9ce4eb13c5f14266a551bddd26901e7c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/86236fc56f1c4e4491486e79b9f1e4ce] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=34.6 K 2024-11-20T17:24:40,245 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b814993c5df4dffbaf9a130154aa42e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732123476615 2024-11-20T17:24:40,245 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb68e78682324f6fac36d1f76ccc4c85, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732123476615 2024-11-20T17:24:40,245 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ce4eb13c5f14266a551bddd26901e7c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732123477232 2024-11-20T17:24:40,245 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8800fca177a4bcfaa3df0a96e5d8bee, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732123477232 2024-11-20T17:24:40,246 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 86236fc56f1c4e4491486e79b9f1e4ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732123479362 2024-11-20T17:24:40,246 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58bc67526c464b7cb4ea1dc407b38878, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732123479362 2024-11-20T17:24:40,260 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:40,260 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#97 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:40,260 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/2c348083c4f8465ebcdd62d37b2f5980 is 50, key is test_row_0/B:col10/1732123479369/Put/seqid=0 2024-11-20T17:24:40,260 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/1024f3c3bb3d4edba059e670b9ce5ce0 is 50, key is test_row_0/A:col10/1732123479369/Put/seqid=0 2024-11-20T17:24:40,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741935_1111 (size=13391) 2024-11-20T17:24:40,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741936_1112 (size=13391) 2024-11-20T17:24:40,281 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/2c348083c4f8465ebcdd62d37b2f5980 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2c348083c4f8465ebcdd62d37b2f5980 2024-11-20T17:24:40,283 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/1024f3c3bb3d4edba059e670b9ce5ce0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1024f3c3bb3d4edba059e670b9ce5ce0 2024-11-20T17:24:40,288 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into 2c348083c4f8465ebcdd62d37b2f5980(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:40,289 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:40,289 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123480242; duration=0sec 2024-11-20T17:24:40,289 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:40,289 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:40,289 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:40,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:24:40,291 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:40,291 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:40,291 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:40,291 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/59e1325c0ff54acab6bf4bd59a30fc5a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9bbf57abe56a4f33850658cb41057bd2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/b3979dfbfe0d4a2287dfc737d0f2fdb8] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=34.6 K 2024-11-20T17:24:40,291 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into 1024f3c3bb3d4edba059e670b9ce5ce0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:40,291 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:40,292 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123480242; duration=0sec 2024-11-20T17:24:40,292 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:40,292 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:40,293 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 59e1325c0ff54acab6bf4bd59a30fc5a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732123476615 2024-11-20T17:24:40,293 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bbf57abe56a4f33850658cb41057bd2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1732123477232 2024-11-20T17:24:40,293 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b3979dfbfe0d4a2287dfc737d0f2fdb8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732123479362 2024-11-20T17:24:40,301 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:40,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-20T17:24:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:40,302 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:24:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:40,303 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#98 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:40,303 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/daf0923166cb40fb93d2b3e1ad31afd8 is 50, key is test_row_0/C:col10/1732123479369/Put/seqid=0 2024-11-20T17:24:40,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/afc805482e1d41aba0e4277e9780f226 is 50, key is test_row_0/A:col10/1732123479422/Put/seqid=0 2024-11-20T17:24:40,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741937_1113 (size=13391) 2024-11-20T17:24:40,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741938_1114 (size=12301) 2024-11-20T17:24:40,316 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/daf0923166cb40fb93d2b3e1ad31afd8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/daf0923166cb40fb93d2b3e1ad31afd8 2024-11-20T17:24:40,323 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into daf0923166cb40fb93d2b3e1ad31afd8(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:40,323 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:40,323 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123480243; duration=0sec 2024-11-20T17:24:40,323 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:40,323 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:40,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:40,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123540546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123540544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123540546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123540548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123540549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123540649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123540649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123540649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123540650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123540652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,715 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/afc805482e1d41aba0e4277e9780f226 2024-11-20T17:24:40,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c6a119d861cb422abff45e180d417dcf is 50, key is test_row_0/B:col10/1732123479422/Put/seqid=0 2024-11-20T17:24:40,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741939_1115 (size=12301) 2024-11-20T17:24:40,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:24:40,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123540853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123540854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123540854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123540855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:40,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123540856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,131 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c6a119d861cb422abff45e180d417dcf 2024-11-20T17:24:41,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/485e54fba8484600a355538af2e48d7a is 50, key is test_row_0/C:col10/1732123479422/Put/seqid=0 2024-11-20T17:24:41,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741940_1116 (size=12301) 2024-11-20T17:24:41,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123541156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123541156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123541157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123541160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123541161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,553 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/485e54fba8484600a355538af2e48d7a 2024-11-20T17:24:41,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/afc805482e1d41aba0e4277e9780f226 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/afc805482e1d41aba0e4277e9780f226 2024-11-20T17:24:41,565 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/afc805482e1d41aba0e4277e9780f226, entries=150, sequenceid=466, filesize=12.0 K 2024-11-20T17:24:41,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/c6a119d861cb422abff45e180d417dcf as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6a119d861cb422abff45e180d417dcf 2024-11-20T17:24:41,571 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6a119d861cb422abff45e180d417dcf, entries=150, sequenceid=466, filesize=12.0 K 2024-11-20T17:24:41,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/485e54fba8484600a355538af2e48d7a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/485e54fba8484600a355538af2e48d7a 2024-11-20T17:24:41,579 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/485e54fba8484600a355538af2e48d7a, entries=150, sequenceid=466, filesize=12.0 K 2024-11-20T17:24:41,580 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 895da877845d8163116b8248e2bc3ffc in 1278ms, sequenceid=466, compaction requested=false 2024-11-20T17:24:41,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:41,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:41,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-20T17:24:41,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-20T17:24:41,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-20T17:24:41,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8930 sec 2024-11-20T17:24:41,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.8980 sec 2024-11-20T17:24:41,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:41,661 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:24:41,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:41,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:41,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:41,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:41,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:41,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:41,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/7712bc0826dc49978afdd9bb1facf869 is 50, key is test_row_0/A:col10/1732123480545/Put/seqid=0 2024-11-20T17:24:41,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741941_1117 (size=14741) 2024-11-20T17:24:41,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123541710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123541710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123541711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123541711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123541712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T17:24:41,792 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T17:24:41,793 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:41,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-20T17:24:41,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:24:41,795 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:41,796 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:41,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:41,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123541814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123541816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123541817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123541817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:41,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123541818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:41,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:24:41,948 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:41,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:24:41,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:41,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:41,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:41,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:41,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123542016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123542018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123542021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123542022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123542022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/7712bc0826dc49978afdd9bb1facf869 2024-11-20T17:24:42,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/f63079396fd447eb88aee6370fe5e044 is 50, key is test_row_0/B:col10/1732123480545/Put/seqid=0 2024-11-20T17:24:42,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741942_1118 (size=12301) 2024-11-20T17:24:42,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:24:42,101 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:42,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:24:42,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:42,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,254 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:42,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:24:42,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:42,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123542320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123542322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123542324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123542324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123542325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:24:42,407 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:42,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:24:42,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:42,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/f63079396fd447eb88aee6370fe5e044 2024-11-20T17:24:42,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ee1c67223a9d4ca2a937ab627f56f3d0 is 50, key is test_row_0/C:col10/1732123480545/Put/seqid=0 2024-11-20T17:24:42,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741943_1119 (size=12301) 2024-11-20T17:24:42,561 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:42,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:24:42,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:42,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,652 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51cab508 to 127.0.0.1:56028 2024-11-20T17:24:42,653 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:42,653 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4bf8e82a to 127.0.0.1:56028 2024-11-20T17:24:42,653 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:42,654 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ba01639 to 127.0.0.1:56028 2024-11-20T17:24:42,654 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:42,654 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24890c79 to 127.0.0.1:56028 2024-11-20T17:24:42,654 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:42,714 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:42,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:24:42,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:42,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32832 deadline: 1732123542824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54148 deadline: 1732123542826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54166 deadline: 1732123542827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:54156 deadline: 1732123542827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32822 deadline: 1732123542829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:42,866 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:42,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:24:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:42,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:24:42,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ee1c67223a9d4ca2a937ab627f56f3d0 2024-11-20T17:24:42,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/7712bc0826dc49978afdd9bb1facf869 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7712bc0826dc49978afdd9bb1facf869 2024-11-20T17:24:42,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7712bc0826dc49978afdd9bb1facf869, entries=200, sequenceid=480, filesize=14.4 K 2024-11-20T17:24:42,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/f63079396fd447eb88aee6370fe5e044 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/f63079396fd447eb88aee6370fe5e044 2024-11-20T17:24:42,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/f63079396fd447eb88aee6370fe5e044, entries=150, sequenceid=480, filesize=12.0 K 2024-11-20T17:24:42,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/ee1c67223a9d4ca2a937ab627f56f3d0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ee1c67223a9d4ca2a937ab627f56f3d0 2024-11-20T17:24:42,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ee1c67223a9d4ca2a937ab627f56f3d0, entries=150, sequenceid=480, filesize=12.0 K 2024-11-20T17:24:42,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 895da877845d8163116b8248e2bc3ffc in 1278ms, sequenceid=480, compaction requested=true 2024-11-20T17:24:42,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:42,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:42,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:42,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:42,938 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:42,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:42,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 895da877845d8163116b8248e2bc3ffc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:42,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:42,938 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:42,939 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:42,939 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:42,939 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/A is initiating minor compaction (all files) 2024-11-20T17:24:42,939 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/B is initiating minor compaction (all files) 2024-11-20T17:24:42,939 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/A in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,939 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/B in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:42,939 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1024f3c3bb3d4edba059e670b9ce5ce0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/afc805482e1d41aba0e4277e9780f226, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7712bc0826dc49978afdd9bb1facf869] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=39.5 K 2024-11-20T17:24:42,939 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2c348083c4f8465ebcdd62d37b2f5980, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6a119d861cb422abff45e180d417dcf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/f63079396fd447eb88aee6370fe5e044] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=37.1 K 2024-11-20T17:24:42,940 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1024f3c3bb3d4edba059e670b9ce5ce0, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732123479362 2024-11-20T17:24:42,940 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c348083c4f8465ebcdd62d37b2f5980, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732123479362 2024-11-20T17:24:42,940 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting afc805482e1d41aba0e4277e9780f226, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1732123479416 2024-11-20T17:24:42,940 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c6a119d861cb422abff45e180d417dcf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1732123479416 2024-11-20T17:24:42,940 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7712bc0826dc49978afdd9bb1facf869, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732123480541 2024-11-20T17:24:42,940 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting f63079396fd447eb88aee6370fe5e044, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732123480541 2024-11-20T17:24:42,949 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#B#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:42,949 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#A#compaction#106 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:42,950 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/0eeb005b56274067aa4305674702a630 is 50, key is test_row_0/A:col10/1732123480545/Put/seqid=0 2024-11-20T17:24:42,950 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/7cb70f17d2cf4bb6af9fb371cbdfde14 is 50, key is test_row_0/B:col10/1732123480545/Put/seqid=0 2024-11-20T17:24:42,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741944_1120 (size=13493) 2024-11-20T17:24:42,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741945_1121 (size=13493) 2024-11-20T17:24:43,020 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:43,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-20T17:24:43,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:43,021 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:24:43,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:43,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:43,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:43,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:43,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:43,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:43,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/0f56a04e9eaa41c9a1f95b752217fd2a is 50, key is test_row_0/A:col10/1732123481704/Put/seqid=0 2024-11-20T17:24:43,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741946_1122 (size=12301) 2024-11-20T17:24:43,361 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/0eeb005b56274067aa4305674702a630 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/0eeb005b56274067aa4305674702a630 2024-11-20T17:24:43,361 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/7cb70f17d2cf4bb6af9fb371cbdfde14 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7cb70f17d2cf4bb6af9fb371cbdfde14 2024-11-20T17:24:43,366 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/A of 895da877845d8163116b8248e2bc3ffc into 0eeb005b56274067aa4305674702a630(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:43,366 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/B of 895da877845d8163116b8248e2bc3ffc into 7cb70f17d2cf4bb6af9fb371cbdfde14(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:43,366 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:43,366 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:43,366 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/A, priority=13, startTime=1732123482938; duration=0sec 2024-11-20T17:24:43,366 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/B, priority=13, startTime=1732123482938; duration=0sec 2024-11-20T17:24:43,366 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:43,366 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:A 2024-11-20T17:24:43,367 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:43,367 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:43,367 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:B 2024-11-20T17:24:43,368 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:43,368 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 895da877845d8163116b8248e2bc3ffc/C is initiating minor compaction (all files) 2024-11-20T17:24:43,368 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 895da877845d8163116b8248e2bc3ffc/C in TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:43,368 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/daf0923166cb40fb93d2b3e1ad31afd8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/485e54fba8484600a355538af2e48d7a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ee1c67223a9d4ca2a937ab627f56f3d0] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp, totalSize=37.1 K 2024-11-20T17:24:43,368 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting daf0923166cb40fb93d2b3e1ad31afd8, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732123479362 2024-11-20T17:24:43,369 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 485e54fba8484600a355538af2e48d7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1732123479416 2024-11-20T17:24:43,369 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee1c67223a9d4ca2a937ab627f56f3d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1732123480541 2024-11-20T17:24:43,378 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 895da877845d8163116b8248e2bc3ffc#C#compaction#108 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:43,378 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/93601844cfbb4b5686da203986127d1c is 50, key is test_row_0/C:col10/1732123480545/Put/seqid=0 2024-11-20T17:24:43,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741947_1123 (size=13493) 2024-11-20T17:24:43,431 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/0f56a04e9eaa41c9a1f95b752217fd2a 2024-11-20T17:24:43,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/6666c7ee813f4d55bead9d2a85f9d1c6 is 50, key is test_row_0/B:col10/1732123481704/Put/seqid=0 2024-11-20T17:24:43,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741948_1124 (size=12301) 2024-11-20T17:24:43,697 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:24:43,804 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/93601844cfbb4b5686da203986127d1c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/93601844cfbb4b5686da203986127d1c 2024-11-20T17:24:43,810 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 895da877845d8163116b8248e2bc3ffc/C of 895da877845d8163116b8248e2bc3ffc into 93601844cfbb4b5686da203986127d1c(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:43,810 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:43,810 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc., storeName=895da877845d8163116b8248e2bc3ffc/C, priority=13, startTime=1732123482938; duration=0sec 2024-11-20T17:24:43,811 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:43,811 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 895da877845d8163116b8248e2bc3ffc:C 2024-11-20T17:24:43,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:43,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. as already flushing 2024-11-20T17:24:43,829 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67f7d3d3 to 127.0.0.1:56028 2024-11-20T17:24:43,830 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:43,832 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x28808bb9 to 127.0.0.1:56028 2024-11-20T17:24:43,832 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:43,834 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x251efa5e to 127.0.0.1:56028 2024-11-20T17:24:43,834 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x774bf929 to 127.0.0.1:56028 2024-11-20T17:24:43,834 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:43,834 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:43,835 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x54af89df to 127.0.0.1:56028 2024-11-20T17:24:43,835 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:43,844 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/6666c7ee813f4d55bead9d2a85f9d1c6 2024-11-20T17:24:43,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/83e52aa32eb749dd96b38aab9e2c1735 is 50, key is test_row_0/C:col10/1732123481704/Put/seqid=0 2024-11-20T17:24:43,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741949_1125 (size=12301) 2024-11-20T17:24:43,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:24:44,256 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/83e52aa32eb749dd96b38aab9e2c1735 2024-11-20T17:24:44,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/0f56a04e9eaa41c9a1f95b752217fd2a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/0f56a04e9eaa41c9a1f95b752217fd2a 2024-11-20T17:24:44,266 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/0f56a04e9eaa41c9a1f95b752217fd2a, entries=150, sequenceid=504, filesize=12.0 K 2024-11-20T17:24:44,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/6666c7ee813f4d55bead9d2a85f9d1c6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6666c7ee813f4d55bead9d2a85f9d1c6 2024-11-20T17:24:44,271 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6666c7ee813f4d55bead9d2a85f9d1c6, entries=150, sequenceid=504, filesize=12.0 K 2024-11-20T17:24:44,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/83e52aa32eb749dd96b38aab9e2c1735 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/83e52aa32eb749dd96b38aab9e2c1735 2024-11-20T17:24:44,276 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/83e52aa32eb749dd96b38aab9e2c1735, entries=150, sequenceid=504, filesize=12.0 K 2024-11-20T17:24:44,277 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=33.54 KB/34350 for 895da877845d8163116b8248e2bc3ffc in 1256ms, sequenceid=504, compaction requested=false 2024-11-20T17:24:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-20T17:24:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-20T17:24:44,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T17:24:44,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4820 sec 2024-11-20T17:24:44,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 2.4870 sec 2024-11-20T17:24:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-20T17:24:45,900 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7613 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7370 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3224 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9668 rows 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3239 2024-11-20T17:24:45,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9711 rows 2024-11-20T17:24:45,901 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:24:45,901 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x038196d7 to 127.0.0.1:56028 2024-11-20T17:24:45,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:24:45,903 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:24:45,907 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:24:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:45,917 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123485917"}]},"ts":"1732123485917"} 2024-11-20T17:24:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:24:45,919 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:24:45,921 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:24:45,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:24:45,926 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=895da877845d8163116b8248e2bc3ffc, UNASSIGN}] 2024-11-20T17:24:45,927 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=895da877845d8163116b8248e2bc3ffc, UNASSIGN 2024-11-20T17:24:45,927 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=895da877845d8163116b8248e2bc3ffc, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:45,928 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:24:45,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure 895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:24:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:24:46,084 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:46,086 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:46,086 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing 895da877845d8163116b8248e2bc3ffc, disabling compactions & flushes 2024-11-20T17:24:46,087 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. after waiting 0 ms 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:46,087 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing 895da877845d8163116b8248e2bc3ffc 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=A 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=B 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 895da877845d8163116b8248e2bc3ffc, store=C 2024-11-20T17:24:46,087 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:46,091 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/19251797178d40cfb604f0cf5e7b386a is 50, key is test_row_0/A:col10/1732123483833/Put/seqid=0 2024-11-20T17:24:46,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741950_1126 (size=12301) 2024-11-20T17:24:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:24:46,496 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/19251797178d40cfb604f0cf5e7b386a 2024-11-20T17:24:46,504 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/b052232ebd7a474ab59b3f4e8c611946 is 50, key is test_row_0/B:col10/1732123483833/Put/seqid=0 2024-11-20T17:24:46,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741951_1127 (size=12301) 2024-11-20T17:24:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:24:46,909 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/b052232ebd7a474ab59b3f4e8c611946 2024-11-20T17:24:46,916 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/66fedda0837f4f2e813e1bf40770b8a9 is 50, key is test_row_0/C:col10/1732123483833/Put/seqid=0 2024-11-20T17:24:46,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741952_1128 (size=12301) 2024-11-20T17:24:47,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:24:47,321 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=515 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/66fedda0837f4f2e813e1bf40770b8a9 2024-11-20T17:24:47,327 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/A/19251797178d40cfb604f0cf5e7b386a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/19251797178d40cfb604f0cf5e7b386a 2024-11-20T17:24:47,331 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/19251797178d40cfb604f0cf5e7b386a, entries=150, sequenceid=515, filesize=12.0 K 2024-11-20T17:24:47,332 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/B/b052232ebd7a474ab59b3f4e8c611946 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/b052232ebd7a474ab59b3f4e8c611946 2024-11-20T17:24:47,337 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/b052232ebd7a474ab59b3f4e8c611946, entries=150, sequenceid=515, filesize=12.0 K 2024-11-20T17:24:47,338 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/.tmp/C/66fedda0837f4f2e813e1bf40770b8a9 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/66fedda0837f4f2e813e1bf40770b8a9 2024-11-20T17:24:47,343 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/66fedda0837f4f2e813e1bf40770b8a9, entries=150, sequenceid=515, filesize=12.0 K 2024-11-20T17:24:47,344 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 895da877845d8163116b8248e2bc3ffc in 1257ms, sequenceid=515, compaction requested=true 2024-11-20T17:24:47,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4511284fa1943c9ac72520dcc9cb24b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/110e6da72e2245988fe11477f245e351, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/73e6ef050f224b3687b038fd88e053d5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d3a55a676f6a43d6b39e3266be49f757, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2a622620e5d8451d87a47ee7301752cd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/52bd35798d0445b5a216f04b9e9bf3c7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/98aaeae59ff242ccbedba70e3a0db508, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1cbcbdd91c4d45188863b7688a4f271d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/9166646d83ff44bf804bc9a645f65564, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/8bc3069e9f56491fad6bb59cc415f18b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/31746a004a254e3182af394d517df7ca, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/a09855b467c34b8a8988338b4154e829, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/3439c30e0bb341d086856796fce9c823, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/874fe7ca24554e2988c36382ee8bf026, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/bb7b06da475a49a2813626cde68abc3f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7da581a06b694ba69b59237fa8553f83, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/337cb931df7f41a4a9263401d61fd3e5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f0d9a81294c64a748f14461eac0dd78b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d939a972f016433a8a0fbf7cd735b3f6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2ec5b4080e384eaeb6ed8bfd72a750e0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/b5618eafa8f94188bbefb5549656d7f6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/38360526ace144eb86f4a40a86250eb5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/adc42c77124a43c48d835e30358dbe43, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d1c75eb0303444a594aa7160e8ffbe3f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/077982b281f24f53a021b8de40753164, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/700f56426784471ebf3deff157b15489, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f32cbeb36bff47cd8ae639a565bdc11d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/cb68e78682324f6fac36d1f76ccc4c85, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/aefa7bf1af264b3abadcf647f0b8a6fd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f8800fca177a4bcfaa3df0a96e5d8bee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/58bc67526c464b7cb4ea1dc407b38878, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1024f3c3bb3d4edba059e670b9ce5ce0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/afc805482e1d41aba0e4277e9780f226, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7712bc0826dc49978afdd9bb1facf869] to archive 2024-11-20T17:24:47,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:24:47,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4511284fa1943c9ac72520dcc9cb24b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4511284fa1943c9ac72520dcc9cb24b 2024-11-20T17:24:47,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/110e6da72e2245988fe11477f245e351 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/110e6da72e2245988fe11477f245e351 2024-11-20T17:24:47,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/73e6ef050f224b3687b038fd88e053d5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/73e6ef050f224b3687b038fd88e053d5 2024-11-20T17:24:47,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d3a55a676f6a43d6b39e3266be49f757 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d3a55a676f6a43d6b39e3266be49f757 2024-11-20T17:24:47,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2a622620e5d8451d87a47ee7301752cd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2a622620e5d8451d87a47ee7301752cd 2024-11-20T17:24:47,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/52bd35798d0445b5a216f04b9e9bf3c7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/52bd35798d0445b5a216f04b9e9bf3c7 2024-11-20T17:24:47,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/98aaeae59ff242ccbedba70e3a0db508 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/98aaeae59ff242ccbedba70e3a0db508 2024-11-20T17:24:47,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1cbcbdd91c4d45188863b7688a4f271d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1cbcbdd91c4d45188863b7688a4f271d 2024-11-20T17:24:47,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/9166646d83ff44bf804bc9a645f65564 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/9166646d83ff44bf804bc9a645f65564 2024-11-20T17:24:47,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/8bc3069e9f56491fad6bb59cc415f18b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/8bc3069e9f56491fad6bb59cc415f18b 2024-11-20T17:24:47,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/31746a004a254e3182af394d517df7ca to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/31746a004a254e3182af394d517df7ca 2024-11-20T17:24:47,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/a09855b467c34b8a8988338b4154e829 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/a09855b467c34b8a8988338b4154e829 2024-11-20T17:24:47,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/3439c30e0bb341d086856796fce9c823 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/3439c30e0bb341d086856796fce9c823 2024-11-20T17:24:47,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/874fe7ca24554e2988c36382ee8bf026 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/874fe7ca24554e2988c36382ee8bf026 2024-11-20T17:24:47,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/bb7b06da475a49a2813626cde68abc3f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/bb7b06da475a49a2813626cde68abc3f 2024-11-20T17:24:47,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7da581a06b694ba69b59237fa8553f83 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7da581a06b694ba69b59237fa8553f83 2024-11-20T17:24:47,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/337cb931df7f41a4a9263401d61fd3e5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/337cb931df7f41a4a9263401d61fd3e5 2024-11-20T17:24:47,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f0d9a81294c64a748f14461eac0dd78b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f0d9a81294c64a748f14461eac0dd78b 2024-11-20T17:24:47,382 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d939a972f016433a8a0fbf7cd735b3f6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d939a972f016433a8a0fbf7cd735b3f6 2024-11-20T17:24:47,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2ec5b4080e384eaeb6ed8bfd72a750e0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/2ec5b4080e384eaeb6ed8bfd72a750e0 2024-11-20T17:24:47,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/b5618eafa8f94188bbefb5549656d7f6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/b5618eafa8f94188bbefb5549656d7f6 2024-11-20T17:24:47,386 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/38360526ace144eb86f4a40a86250eb5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/38360526ace144eb86f4a40a86250eb5 2024-11-20T17:24:47,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/adc42c77124a43c48d835e30358dbe43 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/adc42c77124a43c48d835e30358dbe43 2024-11-20T17:24:47,388 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d1c75eb0303444a594aa7160e8ffbe3f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/d1c75eb0303444a594aa7160e8ffbe3f 2024-11-20T17:24:47,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/077982b281f24f53a021b8de40753164 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/077982b281f24f53a021b8de40753164 2024-11-20T17:24:47,391 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/700f56426784471ebf3deff157b15489 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/700f56426784471ebf3deff157b15489 2024-11-20T17:24:47,392 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f32cbeb36bff47cd8ae639a565bdc11d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f32cbeb36bff47cd8ae639a565bdc11d 2024-11-20T17:24:47,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/c4e2f5ec7fdf4bd29ac658bb9b907f9d 2024-11-20T17:24:47,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/cb68e78682324f6fac36d1f76ccc4c85 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/cb68e78682324f6fac36d1f76ccc4c85 2024-11-20T17:24:47,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/aefa7bf1af264b3abadcf647f0b8a6fd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/aefa7bf1af264b3abadcf647f0b8a6fd 2024-11-20T17:24:47,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f8800fca177a4bcfaa3df0a96e5d8bee to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/f8800fca177a4bcfaa3df0a96e5d8bee 2024-11-20T17:24:47,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/58bc67526c464b7cb4ea1dc407b38878 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/58bc67526c464b7cb4ea1dc407b38878 2024-11-20T17:24:47,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1024f3c3bb3d4edba059e670b9ce5ce0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/1024f3c3bb3d4edba059e670b9ce5ce0 2024-11-20T17:24:47,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/afc805482e1d41aba0e4277e9780f226 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/afc805482e1d41aba0e4277e9780f226 2024-11-20T17:24:47,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7712bc0826dc49978afdd9bb1facf869 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/7712bc0826dc49978afdd9bb1facf869 2024-11-20T17:24:47,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/35f996dcb0774246a174b37bf3006a77, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8b42c94200cc442c8d208a1458b0957f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/41cb0279f5034b1fbddc9e78dea3942e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1cbaa31bdafa49c3a214af353e4cb29b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c1eec70ce7b149b4b4d54856dc98e480, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8505e4f338af448caa18e8b72e5870d3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/0e000fcbd53a488a8579f5876681e30c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/14451cf7d6a948ea874951526cdd99d7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7eefeedcba5d486dabb56e9612439365, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/5ab7e4b084be4953b4c1100fc836867b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ad1e2ebc371c49e0af292510037500fb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c2479c79ff6c4247a00b2c3c3d1fe096, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ed065afd4831452e9451d3a6987fbb91, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2ec031c2b15d422faf9ee7f1eabd022c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/bef7de201b30464a88ffa2c06b0ea387, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6d393fa528a416d96833df5163fb87e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9dba84434c364406ac32986e5350efae, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1b2b0627e28a4d9aa673cc5f8c26c6c2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/468413d9cbc8434fa3c847b3fcd727bd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2b27402dc7694dd48cabbbc5591f3519, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/88493592d3114c87a88145afa2ff6ede, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e3fb7dd468604fad93769fd545250106, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e607e37a0fe7434881e3988686d06aee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/530984e7b59a4745a928867dfe3f4247, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4388cb4a990d4198ac1d65227bfd873c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e101c8ab182845eabed3e3dc2bd09625, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/063c35d6dd1f4271a608ec2467bde6e0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6bb061651e054b38b6c6d0a69bc49a6e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7b814993c5df4dffbaf9a130154aa42e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4337b22286514d929b7740a27bbc39f2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9ce4eb13c5f14266a551bddd26901e7c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2c348083c4f8465ebcdd62d37b2f5980, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/86236fc56f1c4e4491486e79b9f1e4ce, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6a119d861cb422abff45e180d417dcf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/f63079396fd447eb88aee6370fe5e044] to archive 2024-11-20T17:24:47,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:24:47,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/35f996dcb0774246a174b37bf3006a77 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/35f996dcb0774246a174b37bf3006a77 2024-11-20T17:24:47,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8b42c94200cc442c8d208a1458b0957f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8b42c94200cc442c8d208a1458b0957f 2024-11-20T17:24:47,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/41cb0279f5034b1fbddc9e78dea3942e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/41cb0279f5034b1fbddc9e78dea3942e 2024-11-20T17:24:47,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1cbaa31bdafa49c3a214af353e4cb29b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1cbaa31bdafa49c3a214af353e4cb29b 2024-11-20T17:24:47,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c1eec70ce7b149b4b4d54856dc98e480 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c1eec70ce7b149b4b4d54856dc98e480 2024-11-20T17:24:47,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8505e4f338af448caa18e8b72e5870d3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/8505e4f338af448caa18e8b72e5870d3 2024-11-20T17:24:47,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/0e000fcbd53a488a8579f5876681e30c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/0e000fcbd53a488a8579f5876681e30c 2024-11-20T17:24:47,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/14451cf7d6a948ea874951526cdd99d7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/14451cf7d6a948ea874951526cdd99d7 2024-11-20T17:24:47,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7eefeedcba5d486dabb56e9612439365 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7eefeedcba5d486dabb56e9612439365 2024-11-20T17:24:47,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/5ab7e4b084be4953b4c1100fc836867b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/5ab7e4b084be4953b4c1100fc836867b 2024-11-20T17:24:47,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ad1e2ebc371c49e0af292510037500fb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ad1e2ebc371c49e0af292510037500fb 2024-11-20T17:24:47,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c2479c79ff6c4247a00b2c3c3d1fe096 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c2479c79ff6c4247a00b2c3c3d1fe096 2024-11-20T17:24:47,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ed065afd4831452e9451d3a6987fbb91 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/ed065afd4831452e9451d3a6987fbb91 2024-11-20T17:24:47,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2ec031c2b15d422faf9ee7f1eabd022c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2ec031c2b15d422faf9ee7f1eabd022c 2024-11-20T17:24:47,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/bef7de201b30464a88ffa2c06b0ea387 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/bef7de201b30464a88ffa2c06b0ea387 2024-11-20T17:24:47,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6d393fa528a416d96833df5163fb87e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6d393fa528a416d96833df5163fb87e 2024-11-20T17:24:47,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9dba84434c364406ac32986e5350efae to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9dba84434c364406ac32986e5350efae 2024-11-20T17:24:47,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1b2b0627e28a4d9aa673cc5f8c26c6c2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/1b2b0627e28a4d9aa673cc5f8c26c6c2 2024-11-20T17:24:47,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/468413d9cbc8434fa3c847b3fcd727bd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/468413d9cbc8434fa3c847b3fcd727bd 2024-11-20T17:24:47,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2b27402dc7694dd48cabbbc5591f3519 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2b27402dc7694dd48cabbbc5591f3519 2024-11-20T17:24:47,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/88493592d3114c87a88145afa2ff6ede to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/88493592d3114c87a88145afa2ff6ede 2024-11-20T17:24:47,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e3fb7dd468604fad93769fd545250106 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e3fb7dd468604fad93769fd545250106 2024-11-20T17:24:47,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e607e37a0fe7434881e3988686d06aee to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e607e37a0fe7434881e3988686d06aee 2024-11-20T17:24:47,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/530984e7b59a4745a928867dfe3f4247 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/530984e7b59a4745a928867dfe3f4247 2024-11-20T17:24:47,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4388cb4a990d4198ac1d65227bfd873c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4388cb4a990d4198ac1d65227bfd873c 2024-11-20T17:24:47,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e101c8ab182845eabed3e3dc2bd09625 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/e101c8ab182845eabed3e3dc2bd09625 2024-11-20T17:24:47,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/063c35d6dd1f4271a608ec2467bde6e0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/063c35d6dd1f4271a608ec2467bde6e0 2024-11-20T17:24:47,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6bb061651e054b38b6c6d0a69bc49a6e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6bb061651e054b38b6c6d0a69bc49a6e 2024-11-20T17:24:47,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7b814993c5df4dffbaf9a130154aa42e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7b814993c5df4dffbaf9a130154aa42e 2024-11-20T17:24:47,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4337b22286514d929b7740a27bbc39f2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/4337b22286514d929b7740a27bbc39f2 2024-11-20T17:24:47,452 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9ce4eb13c5f14266a551bddd26901e7c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/9ce4eb13c5f14266a551bddd26901e7c 2024-11-20T17:24:47,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2c348083c4f8465ebcdd62d37b2f5980 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/2c348083c4f8465ebcdd62d37b2f5980 2024-11-20T17:24:47,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/86236fc56f1c4e4491486e79b9f1e4ce to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/86236fc56f1c4e4491486e79b9f1e4ce 2024-11-20T17:24:47,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6a119d861cb422abff45e180d417dcf to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/c6a119d861cb422abff45e180d417dcf 2024-11-20T17:24:47,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/f63079396fd447eb88aee6370fe5e044 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/f63079396fd447eb88aee6370fe5e044 2024-11-20T17:24:47,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/5da896eac12142d6bd0d628f2b22afdc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bfbd54f1d73d4b9998c43771469c98ba, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ed11ecf56daa47f3af449ac7fb19b797, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7291404716754ae3848d96dddffa5895, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7536c55320ec432db3a953cb38c0c086, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a5ecc0ee825b4ebb8278152c405b81f6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/741a072ba027498a812679056e279012, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9c49657710234b0681b647b9d7a91fb4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/96eecc685e694824a5ed96e1b7d2aa7f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ddfe821192904583b0a02c1041c2f7db, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7661e54223de49c99b0951ec4de092ee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/451003116cb346568ed0deff2aeef532, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a01614e377e244a5ae8ecbe26b6da7b1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/613e3be5892f43e7850614a6a2adda20, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/169bf1cc0548435b9f265dc5402dc554, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/269093d9515643b594b097a6a8de4b35, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/20ab1351e8ac4f54b61a013ae6d1ba1c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/6b5002b6d4e648c99d70eb0f401b9fe8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/1550d6a42213409e82c2f511f59dc9ee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/fd5dee64f3cb478fbdec8a53c66641a3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/de21175dbb7547d2b576931f4af72c94, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/38eddf1958d04294ba8b345aede8126a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4a80f10cd0f44a5ea9e3e2668d68f80a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4b5fb0a2136d44249308f546ae5d918d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/25b437bedce74c91b315af64f4b9c558, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a910d9c7643f4a31b509fda5140ac1d1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/282728b5d3fb4b8b82eef6026ab4c2e1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/59e1325c0ff54acab6bf4bd59a30fc5a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bb61c56304314889ba619e513016a7c0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9bbf57abe56a4f33850658cb41057bd2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/daf0923166cb40fb93d2b3e1ad31afd8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/b3979dfbfe0d4a2287dfc737d0f2fdb8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/485e54fba8484600a355538af2e48d7a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ee1c67223a9d4ca2a937ab627f56f3d0] to archive 2024-11-20T17:24:47,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:24:47,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/5da896eac12142d6bd0d628f2b22afdc to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/5da896eac12142d6bd0d628f2b22afdc 2024-11-20T17:24:47,462 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a7fe6e66ad134dd4b773bdeaeb1bfd3d 2024-11-20T17:24:47,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bfbd54f1d73d4b9998c43771469c98ba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bfbd54f1d73d4b9998c43771469c98ba 2024-11-20T17:24:47,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ed11ecf56daa47f3af449ac7fb19b797 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ed11ecf56daa47f3af449ac7fb19b797 2024-11-20T17:24:47,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7291404716754ae3848d96dddffa5895 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7291404716754ae3848d96dddffa5895 2024-11-20T17:24:47,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7536c55320ec432db3a953cb38c0c086 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7536c55320ec432db3a953cb38c0c086 2024-11-20T17:24:47,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a5ecc0ee825b4ebb8278152c405b81f6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a5ecc0ee825b4ebb8278152c405b81f6 2024-11-20T17:24:47,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/741a072ba027498a812679056e279012 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/741a072ba027498a812679056e279012 2024-11-20T17:24:47,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9c49657710234b0681b647b9d7a91fb4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9c49657710234b0681b647b9d7a91fb4 2024-11-20T17:24:47,473 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/96eecc685e694824a5ed96e1b7d2aa7f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/96eecc685e694824a5ed96e1b7d2aa7f 2024-11-20T17:24:47,474 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ddfe821192904583b0a02c1041c2f7db to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ddfe821192904583b0a02c1041c2f7db 2024-11-20T17:24:47,475 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7661e54223de49c99b0951ec4de092ee to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/7661e54223de49c99b0951ec4de092ee 2024-11-20T17:24:47,476 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/451003116cb346568ed0deff2aeef532 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/451003116cb346568ed0deff2aeef532 2024-11-20T17:24:47,477 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a01614e377e244a5ae8ecbe26b6da7b1 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a01614e377e244a5ae8ecbe26b6da7b1 2024-11-20T17:24:47,478 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/613e3be5892f43e7850614a6a2adda20 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/613e3be5892f43e7850614a6a2adda20 2024-11-20T17:24:47,479 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/169bf1cc0548435b9f265dc5402dc554 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/169bf1cc0548435b9f265dc5402dc554 2024-11-20T17:24:47,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/269093d9515643b594b097a6a8de4b35 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/269093d9515643b594b097a6a8de4b35 2024-11-20T17:24:47,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/20ab1351e8ac4f54b61a013ae6d1ba1c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/20ab1351e8ac4f54b61a013ae6d1ba1c 2024-11-20T17:24:47,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/6b5002b6d4e648c99d70eb0f401b9fe8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/6b5002b6d4e648c99d70eb0f401b9fe8 2024-11-20T17:24:47,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/1550d6a42213409e82c2f511f59dc9ee to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/1550d6a42213409e82c2f511f59dc9ee 2024-11-20T17:24:47,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/fd5dee64f3cb478fbdec8a53c66641a3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/fd5dee64f3cb478fbdec8a53c66641a3 2024-11-20T17:24:47,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/de21175dbb7547d2b576931f4af72c94 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/de21175dbb7547d2b576931f4af72c94 2024-11-20T17:24:47,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/38eddf1958d04294ba8b345aede8126a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/38eddf1958d04294ba8b345aede8126a 2024-11-20T17:24:47,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4a80f10cd0f44a5ea9e3e2668d68f80a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4a80f10cd0f44a5ea9e3e2668d68f80a 2024-11-20T17:24:47,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4b5fb0a2136d44249308f546ae5d918d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/4b5fb0a2136d44249308f546ae5d918d 2024-11-20T17:24:47,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/25b437bedce74c91b315af64f4b9c558 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/25b437bedce74c91b315af64f4b9c558 2024-11-20T17:24:47,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a910d9c7643f4a31b509fda5140ac1d1 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/a910d9c7643f4a31b509fda5140ac1d1 2024-11-20T17:24:47,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/282728b5d3fb4b8b82eef6026ab4c2e1 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/282728b5d3fb4b8b82eef6026ab4c2e1 2024-11-20T17:24:47,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/59e1325c0ff54acab6bf4bd59a30fc5a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/59e1325c0ff54acab6bf4bd59a30fc5a 2024-11-20T17:24:47,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bb61c56304314889ba619e513016a7c0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/bb61c56304314889ba619e513016a7c0 2024-11-20T17:24:47,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9bbf57abe56a4f33850658cb41057bd2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/9bbf57abe56a4f33850658cb41057bd2 2024-11-20T17:24:47,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/daf0923166cb40fb93d2b3e1ad31afd8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/daf0923166cb40fb93d2b3e1ad31afd8 2024-11-20T17:24:47,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/b3979dfbfe0d4a2287dfc737d0f2fdb8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/b3979dfbfe0d4a2287dfc737d0f2fdb8 2024-11-20T17:24:47,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/485e54fba8484600a355538af2e48d7a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/485e54fba8484600a355538af2e48d7a 2024-11-20T17:24:47,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ee1c67223a9d4ca2a937ab627f56f3d0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/ee1c67223a9d4ca2a937ab627f56f3d0 2024-11-20T17:24:47,505 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/recovered.edits/518.seqid, newMaxSeqId=518, maxSeqId=1 2024-11-20T17:24:47,508 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc. 2024-11-20T17:24:47,508 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for 895da877845d8163116b8248e2bc3ffc: 2024-11-20T17:24:47,511 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed 895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:47,511 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=895da877845d8163116b8248e2bc3ffc, regionState=CLOSED 2024-11-20T17:24:47,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-20T17:24:47,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure 895da877845d8163116b8248e2bc3ffc, server=d514dc944523,44015,1732123455293 in 1.5830 sec 2024-11-20T17:24:47,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-20T17:24:47,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=895da877845d8163116b8248e2bc3ffc, UNASSIGN in 1.5880 sec 2024-11-20T17:24:47,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T17:24:47,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5940 sec 2024-11-20T17:24:47,519 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123487519"}]},"ts":"1732123487519"} 2024-11-20T17:24:47,520 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:24:47,522 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:24:47,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6110 sec 2024-11-20T17:24:48,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T17:24:48,022 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-20T17:24:48,025 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:24:48,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:48,030 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:48,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T17:24:48,031 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:48,035 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:48,039 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/recovered.edits] 2024-11-20T17:24:48,042 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/0eeb005b56274067aa4305674702a630 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/0eeb005b56274067aa4305674702a630 2024-11-20T17:24:48,044 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/0f56a04e9eaa41c9a1f95b752217fd2a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/0f56a04e9eaa41c9a1f95b752217fd2a 2024-11-20T17:24:48,045 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/19251797178d40cfb604f0cf5e7b386a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/A/19251797178d40cfb604f0cf5e7b386a 2024-11-20T17:24:48,048 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6666c7ee813f4d55bead9d2a85f9d1c6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/6666c7ee813f4d55bead9d2a85f9d1c6 2024-11-20T17:24:48,049 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7cb70f17d2cf4bb6af9fb371cbdfde14 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/7cb70f17d2cf4bb6af9fb371cbdfde14 2024-11-20T17:24:48,050 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/b052232ebd7a474ab59b3f4e8c611946 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/B/b052232ebd7a474ab59b3f4e8c611946 2024-11-20T17:24:48,053 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/66fedda0837f4f2e813e1bf40770b8a9 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/66fedda0837f4f2e813e1bf40770b8a9 2024-11-20T17:24:48,054 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/83e52aa32eb749dd96b38aab9e2c1735 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/83e52aa32eb749dd96b38aab9e2c1735 2024-11-20T17:24:48,055 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/93601844cfbb4b5686da203986127d1c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/C/93601844cfbb4b5686da203986127d1c 2024-11-20T17:24:48,057 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/recovered.edits/518.seqid to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc/recovered.edits/518.seqid 2024-11-20T17:24:48,058 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/895da877845d8163116b8248e2bc3ffc 2024-11-20T17:24:48,058 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:24:48,063 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:48,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T17:24:48,070 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:24:48,100 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:24:48,102 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:48,102 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:24:48,102 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123488102"}]},"ts":"9223372036854775807"} 2024-11-20T17:24:48,105 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:24:48,105 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 895da877845d8163116b8248e2bc3ffc, NAME => 'TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:24:48,105 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:24:48,105 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123488105"}]},"ts":"9223372036854775807"} 2024-11-20T17:24:48,108 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:24:48,110 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:48,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 85 msec 2024-11-20T17:24:48,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-20T17:24:48,132 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-20T17:24:48,143 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=240 (was 219) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1888183143_22 at /127.0.0.1:55304 [Waiting for operation #134] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4e74fea5-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4e74fea5-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1819272385_22 at /127.0.0.1:56996 [Waiting for operation #296] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;d514dc944523:44015-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4e74fea5-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/d514dc944523:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1888183143_22 at /127.0.0.1:54884 [Waiting for operation #283] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x4e74fea5-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/d514dc944523:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=458 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=280 (was 252) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6309 (was 6857) 2024-11-20T17:24:48,152 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=240, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=280, ProcessCount=11, AvailableMemoryMB=6309 2024-11-20T17:24:48,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:24:48,154 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:24:48,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:48,156 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:24:48,156 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:48,156 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-11-20T17:24:48,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T17:24:48,157 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:24:48,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741953_1129 (size=963) 2024-11-20T17:24:48,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T17:24:48,358 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T17:24:48,360 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T17:24:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T17:24:48,565 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:24:48,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741954_1130 (size=53) 2024-11-20T17:24:48,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T17:24:48,972 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:48,972 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 51263f54befea8a9195f5613e0910232, disabling compactions & flushes 2024-11-20T17:24:48,972 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:48,972 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:48,972 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. after waiting 0 ms 2024-11-20T17:24:48,972 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:48,972 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:48,972 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:48,974 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:24:48,975 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123488974"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123488974"}]},"ts":"1732123488974"} 2024-11-20T17:24:48,976 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:24:48,977 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:24:48,977 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123488977"}]},"ts":"1732123488977"} 2024-11-20T17:24:48,978 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:24:48,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, ASSIGN}] 2024-11-20T17:24:48,982 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, ASSIGN 2024-11-20T17:24:48,983 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, ASSIGN; state=OFFLINE, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=false 2024-11-20T17:24:49,134 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=51263f54befea8a9195f5613e0910232, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:49,135 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure 51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:24:49,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T17:24:49,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:49,290 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:49,291 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:24:49,291 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,291 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:49,291 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,291 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,292 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,294 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:49,294 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51263f54befea8a9195f5613e0910232 columnFamilyName A 2024-11-20T17:24:49,294 DEBUG [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:49,294 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(327): Store=51263f54befea8a9195f5613e0910232/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:49,295 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,296 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:49,296 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51263f54befea8a9195f5613e0910232 columnFamilyName B 2024-11-20T17:24:49,297 DEBUG [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:49,297 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(327): Store=51263f54befea8a9195f5613e0910232/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:49,297 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,298 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:49,298 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51263f54befea8a9195f5613e0910232 columnFamilyName C 2024-11-20T17:24:49,298 DEBUG [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:49,299 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(327): Store=51263f54befea8a9195f5613e0910232/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:49,299 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:49,299 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,300 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,301 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:24:49,302 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:49,304 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:24:49,305 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened 51263f54befea8a9195f5613e0910232; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73712046, jitterRate=0.09839507937431335}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:24:49,305 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:49,306 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., pid=41, masterSystemTime=1732123489287 2024-11-20T17:24:49,307 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:49,307 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:49,308 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=51263f54befea8a9195f5613e0910232, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:49,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-20T17:24:49,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure 51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 in 174 msec 2024-11-20T17:24:49,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-20T17:24:49,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, ASSIGN in 330 msec 2024-11-20T17:24:49,313 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:24:49,313 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123489313"}]},"ts":"1732123489313"} 2024-11-20T17:24:49,314 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:24:49,317 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:24:49,318 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1630 sec 2024-11-20T17:24:50,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T17:24:50,262 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-11-20T17:24:50,265 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f3c14c0 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3242ee55 2024-11-20T17:24:50,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d3b0c59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:50,270 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:50,272 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:50,273 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:24:50,275 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45442, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:24:50,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:24:50,280 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:24:50,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T17:24:50,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741955_1131 (size=999) 2024-11-20T17:24:50,699 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T17:24:50,699 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T17:24:50,703 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:24:50,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, REOPEN/MOVE}] 2024-11-20T17:24:50,713 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, REOPEN/MOVE 2024-11-20T17:24:50,713 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=51263f54befea8a9195f5613e0910232, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:50,715 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:24:50,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure 51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:24:50,866 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:50,867 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:50,867 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:24:50,867 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing 51263f54befea8a9195f5613e0910232, disabling compactions & flushes 2024-11-20T17:24:50,867 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:50,867 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:50,867 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. after waiting 0 ms 2024-11-20T17:24:50,867 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:50,871 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T17:24:50,871 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:50,871 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:50,872 WARN [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: 51263f54befea8a9195f5613e0910232 to self. 2024-11-20T17:24:50,873 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:50,873 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=51263f54befea8a9195f5613e0910232, regionState=CLOSED 2024-11-20T17:24:50,876 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-20T17:24:50,876 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure 51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 in 160 msec 2024-11-20T17:24:50,876 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, REOPEN/MOVE; state=CLOSED, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=true 2024-11-20T17:24:51,027 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=51263f54befea8a9195f5613e0910232, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure 51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:24:51,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:51,184 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,184 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:24:51,184 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,184 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:24:51,185 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,185 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,187 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,188 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:51,192 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51263f54befea8a9195f5613e0910232 columnFamilyName A 2024-11-20T17:24:51,194 DEBUG [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:51,195 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(327): Store=51263f54befea8a9195f5613e0910232/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:51,195 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,196 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:51,196 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51263f54befea8a9195f5613e0910232 columnFamilyName B 2024-11-20T17:24:51,196 DEBUG [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:51,197 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(327): Store=51263f54befea8a9195f5613e0910232/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:51,197 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,197 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:24:51,197 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 51263f54befea8a9195f5613e0910232 columnFamilyName C 2024-11-20T17:24:51,197 DEBUG [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:51,198 INFO [StoreOpener-51263f54befea8a9195f5613e0910232-1 {}] regionserver.HStore(327): Store=51263f54befea8a9195f5613e0910232/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:24:51,198 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,199 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,200 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,201 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:24:51,202 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,203 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened 51263f54befea8a9195f5613e0910232; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71330655, jitterRate=0.06290958821773529}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:24:51,204 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:51,205 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., pid=46, masterSystemTime=1732123491181 2024-11-20T17:24:51,206 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,206 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,207 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=51263f54befea8a9195f5613e0910232, regionState=OPEN, openSeqNum=5, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-11-20T17:24:51,209 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure 51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 in 179 msec 2024-11-20T17:24:51,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-20T17:24:51,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, REOPEN/MOVE in 497 msec 2024-11-20T17:24:51,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-20T17:24:51,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 509 msec 2024-11-20T17:24:51,216 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 931 msec 2024-11-20T17:24:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-20T17:24:51,224 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a0aa7d7 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2aa409d0 2024-11-20T17:24:51,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27600c58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,231 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0231f064 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53af6163 2024-11-20T17:24:51,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57449e06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,235 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x28c904d8 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15736fcc 2024-11-20T17:24:51,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35b51e5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,240 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d6eb994 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32168855 2024-11-20T17:24:51,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c517130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,244 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a63fed4 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40832d66 2024-11-20T17:24:51,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@776c0cb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,248 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x29dad7a8 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3ec46f90 2024-11-20T17:24:51,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@347ad9b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,252 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62c6fdab to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f63b68c 2024-11-20T17:24:51,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d36579b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,255 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x250a1de4 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@473f181f 2024-11-20T17:24:51,258 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@681a05ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,259 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49456175 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@768577a2 2024-11-20T17:24:51,262 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e0829fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:24:51,265 DEBUG [hconnection-0x3136dfc6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,265 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:51,266 DEBUG [hconnection-0x6c62da32-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,266 DEBUG [hconnection-0x6124f4e5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-20T17:24:51,267 DEBUG [hconnection-0x6297edbb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,267 DEBUG [hconnection-0x5e02d7a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,267 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:51,267 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T17:24:51,268 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40214, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,268 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,268 DEBUG [hconnection-0x1fc73346-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,268 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:51,269 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:51,269 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40244, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,270 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,270 DEBUG [hconnection-0x724e4334-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,271 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,271 DEBUG [hconnection-0x1a30796a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,272 DEBUG [hconnection-0xcb664c7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:24:51,272 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40260, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,273 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:24:51,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:24:51,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:51,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:51,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:51,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:51,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:51,292 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:51,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120042dae3fe03b43beb7f08abc0f426823_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123491279/Put/seqid=0 2024-11-20T17:24:51,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741956_1132 (size=12154) 2024-11-20T17:24:51,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T17:24:51,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123551366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123551368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123551368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123551368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123551369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,421 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:51,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:51,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:51,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123551472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123551483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123551483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123551483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123551484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T17:24:51,578 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:51,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:51,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:51,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123551678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123551686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123551686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123551687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:51,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123551687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:51,732 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:51,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:51,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:51,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,733 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,741 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:51,750 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120042dae3fe03b43beb7f08abc0f426823_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120042dae3fe03b43beb7f08abc0f426823_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,751 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/871e71c616584f02bafdeaa5c614c9c7, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:51,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/871e71c616584f02bafdeaa5c614c9c7 is 175, key is test_row_0/A:col10/1732123491279/Put/seqid=0 2024-11-20T17:24:51,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741957_1133 (size=30955) 2024-11-20T17:24:51,782 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/871e71c616584f02bafdeaa5c614c9c7 2024-11-20T17:24:51,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/4c27577e0f33486fa636eddead4f1ac0 is 50, key is test_row_0/B:col10/1732123491279/Put/seqid=0 2024-11-20T17:24:51,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741958_1134 (size=12001) 2024-11-20T17:24:51,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/4c27577e0f33486fa636eddead4f1ac0 2024-11-20T17:24:51,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/3c1d560d12c549dbb03b111efd4f3562 is 50, key is test_row_0/C:col10/1732123491279/Put/seqid=0 2024-11-20T17:24:51,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T17:24:51,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741959_1135 (size=12001) 2024-11-20T17:24:51,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/3c1d560d12c549dbb03b111efd4f3562 2024-11-20T17:24:51,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/871e71c616584f02bafdeaa5c614c9c7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/871e71c616584f02bafdeaa5c614c9c7 2024-11-20T17:24:51,886 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:51,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:51,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:51,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:51,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:51,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/871e71c616584f02bafdeaa5c614c9c7, entries=150, sequenceid=17, filesize=30.2 K 2024-11-20T17:24:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/4c27577e0f33486fa636eddead4f1ac0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4c27577e0f33486fa636eddead4f1ac0 2024-11-20T17:24:51,902 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4c27577e0f33486fa636eddead4f1ac0, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T17:24:51,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/3c1d560d12c549dbb03b111efd4f3562 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3c1d560d12c549dbb03b111efd4f3562 2024-11-20T17:24:51,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3c1d560d12c549dbb03b111efd4f3562, entries=150, sequenceid=17, filesize=11.7 K 2024-11-20T17:24:51,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 51263f54befea8a9195f5613e0910232 in 622ms, sequenceid=17, compaction requested=false 2024-11-20T17:24:51,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:51,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:51,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:24:51,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:51,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:51,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:51,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:51,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:51,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:52,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112064ac161c81c24716b41f4260ddab96fa_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123491367/Put/seqid=0 2024-11-20T17:24:52,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123552008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123552008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123552010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123552010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123552014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741960_1136 (size=14594) 2024-11-20T17:24:52,025 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:52,031 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112064ac161c81c24716b41f4260ddab96fa_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112064ac161c81c24716b41f4260ddab96fa_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:52,033 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/c100d5bab1114bdd8166489f6bf4dc00, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:52,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/c100d5bab1114bdd8166489f6bf4dc00 is 175, key is test_row_0/A:col10/1732123491367/Put/seqid=0 2024-11-20T17:24:52,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:52,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:52,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:52,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741961_1137 (size=39549) 2024-11-20T17:24:52,051 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/c100d5bab1114bdd8166489f6bf4dc00 2024-11-20T17:24:52,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/4fff0ab6d10b41b4833f44e07cff126d is 50, key is test_row_0/B:col10/1732123491367/Put/seqid=0 2024-11-20T17:24:52,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741962_1138 (size=12001) 2024-11-20T17:24:52,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/4fff0ab6d10b41b4833f44e07cff126d 2024-11-20T17:24:52,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/7f3f21ae53fb460080391ddcceeedb59 is 50, key is test_row_0/C:col10/1732123491367/Put/seqid=0 2024-11-20T17:24:52,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741963_1139 (size=12001) 2024-11-20T17:24:52,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123552115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123552115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123552116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123552119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123552123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,192 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:52,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:52,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:52,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123552319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123552320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123552320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123552323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123552327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,346 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:52,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:52,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:52,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T17:24:52,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/7f3f21ae53fb460080391ddcceeedb59 2024-11-20T17:24:52,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:52,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:52,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:52,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/c100d5bab1114bdd8166489f6bf4dc00 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/c100d5bab1114bdd8166489f6bf4dc00 2024-11-20T17:24:52,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/c100d5bab1114bdd8166489f6bf4dc00, entries=200, sequenceid=41, filesize=38.6 K 2024-11-20T17:24:52,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/4fff0ab6d10b41b4833f44e07cff126d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4fff0ab6d10b41b4833f44e07cff126d 2024-11-20T17:24:52,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4fff0ab6d10b41b4833f44e07cff126d, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T17:24:52,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/7f3f21ae53fb460080391ddcceeedb59 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/7f3f21ae53fb460080391ddcceeedb59 2024-11-20T17:24:52,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/7f3f21ae53fb460080391ddcceeedb59, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T17:24:52,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 51263f54befea8a9195f5613e0910232 in 542ms, sequenceid=41, compaction requested=false 2024-11-20T17:24:52,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:52,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:24:52,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:52,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:52,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:52,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:52,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:52,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:52,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112004f4c4442e66413ea017c6200a25c0b3_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123492626/Put/seqid=0 2024-11-20T17:24:52,656 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:52,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123552653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741964_1140 (size=17034) 2024-11-20T17:24:52,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123552656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,660 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:52,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123552658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123552660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123552660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,667 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112004f4c4442e66413ea017c6200a25c0b3_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004f4c4442e66413ea017c6200a25c0b3_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:52,671 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/143765aacfce48f894405a37ac5d7183, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:52,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/143765aacfce48f894405a37ac5d7183 is 175, key is test_row_0/A:col10/1732123492626/Put/seqid=0 2024-11-20T17:24:52,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741965_1141 (size=48139) 2024-11-20T17:24:52,687 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/143765aacfce48f894405a37ac5d7183 2024-11-20T17:24:52,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/9fd868ddd4254575b1d966632b60bf0a is 50, key is test_row_0/B:col10/1732123492626/Put/seqid=0 2024-11-20T17:24:52,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741966_1142 (size=12001) 2024-11-20T17:24:52,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/9fd868ddd4254575b1d966632b60bf0a 2024-11-20T17:24:52,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/48a37ac8e34a429097abe54a5fabf864 is 50, key is test_row_0/C:col10/1732123492626/Put/seqid=0 2024-11-20T17:24:52,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741967_1143 (size=12001) 2024-11-20T17:24:52,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/48a37ac8e34a429097abe54a5fabf864 2024-11-20T17:24:52,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/143765aacfce48f894405a37ac5d7183 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/143765aacfce48f894405a37ac5d7183 2024-11-20T17:24:52,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/143765aacfce48f894405a37ac5d7183, entries=250, sequenceid=56, filesize=47.0 K 2024-11-20T17:24:52,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/9fd868ddd4254575b1d966632b60bf0a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9fd868ddd4254575b1d966632b60bf0a 2024-11-20T17:24:52,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9fd868ddd4254575b1d966632b60bf0a, entries=150, sequenceid=56, filesize=11.7 K 2024-11-20T17:24:52,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/48a37ac8e34a429097abe54a5fabf864 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/48a37ac8e34a429097abe54a5fabf864 2024-11-20T17:24:52,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123552759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123552761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123552762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/48a37ac8e34a429097abe54a5fabf864, entries=150, sequenceid=56, filesize=11.7 K 2024-11-20T17:24:52,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=134.18 KB/137400 for 51263f54befea8a9195f5613e0910232 in 137ms, sequenceid=56, compaction requested=true 2024-11-20T17:24:52,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:52,766 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:52,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:52,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:52,766 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:52,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:52,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:52,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:52,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:52,767 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:52,767 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/A is initiating minor compaction (all files) 2024-11-20T17:24:52,767 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/A in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,767 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:52,768 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/871e71c616584f02bafdeaa5c614c9c7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/c100d5bab1114bdd8166489f6bf4dc00, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/143765aacfce48f894405a37ac5d7183] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=115.9 K 2024-11-20T17:24:52,768 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/B is initiating minor compaction (all files) 2024-11-20T17:24:52,768 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/B in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,768 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,768 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/871e71c616584f02bafdeaa5c614c9c7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/c100d5bab1114bdd8166489f6bf4dc00, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/143765aacfce48f894405a37ac5d7183] 2024-11-20T17:24:52,768 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4c27577e0f33486fa636eddead4f1ac0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4fff0ab6d10b41b4833f44e07cff126d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9fd868ddd4254575b1d966632b60bf0a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=35.2 K 2024-11-20T17:24:52,769 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 871e71c616584f02bafdeaa5c614c9c7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123491279 2024-11-20T17:24:52,769 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c27577e0f33486fa636eddead4f1ac0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123491279 2024-11-20T17:24:52,770 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting c100d5bab1114bdd8166489f6bf4dc00, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123491356 2024-11-20T17:24:52,770 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fff0ab6d10b41b4833f44e07cff126d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123491365 2024-11-20T17:24:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:52,770 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 143765aacfce48f894405a37ac5d7183, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732123492000 2024-11-20T17:24:52,770 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fd868ddd4254575b1d966632b60bf0a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732123492011 2024-11-20T17:24:52,770 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:24:52,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:52,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:52,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:52,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:52,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:52,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:52,787 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#B#compaction#123 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:52,788 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/a4dcfe75a97044fb8eb43c355f12f326 is 50, key is test_row_0/B:col10/1732123492626/Put/seqid=0 2024-11-20T17:24:52,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123552795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123552795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112008b97323a64a45f597517b58030b7af1_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123492769/Put/seqid=0 2024-11-20T17:24:52,809 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:52,812 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:52,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:52,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:52,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,825 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e0c963036427447196c8fe610340792a_51263f54befea8a9195f5613e0910232 store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:52,835 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e0c963036427447196c8fe610340792a_51263f54befea8a9195f5613e0910232, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:52,835 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e0c963036427447196c8fe610340792a_51263f54befea8a9195f5613e0910232 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:52,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741968_1144 (size=12104) 2024-11-20T17:24:52,855 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/a4dcfe75a97044fb8eb43c355f12f326 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/a4dcfe75a97044fb8eb43c355f12f326 2024-11-20T17:24:52,862 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/B of 51263f54befea8a9195f5613e0910232 into a4dcfe75a97044fb8eb43c355f12f326(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:52,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:52,862 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/B, priority=13, startTime=1732123492766; duration=0sec 2024-11-20T17:24:52,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:52,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:B 2024-11-20T17:24:52,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:52,864 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:52,864 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/C is initiating minor compaction (all files) 2024-11-20T17:24:52,864 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/C in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,864 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3c1d560d12c549dbb03b111efd4f3562, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/7f3f21ae53fb460080391ddcceeedb59, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/48a37ac8e34a429097abe54a5fabf864] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=35.2 K 2024-11-20T17:24:52,864 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c1d560d12c549dbb03b111efd4f3562, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732123491279 2024-11-20T17:24:52,865 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f3f21ae53fb460080391ddcceeedb59, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123491365 2024-11-20T17:24:52,866 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 48a37ac8e34a429097abe54a5fabf864, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732123492011 2024-11-20T17:24:52,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741969_1145 (size=12154) 2024-11-20T17:24:52,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123552898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,901 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#C#compaction#126 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:52,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123552900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,902 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/68cec9e5e2294620ac659cf6df4a9ad4 is 50, key is test_row_0/C:col10/1732123492626/Put/seqid=0 2024-11-20T17:24:52,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741970_1146 (size=4469) 2024-11-20T17:24:52,908 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#A#compaction#125 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:52,910 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/cc242a97ad5f4547b323947efd24ce73 is 175, key is test_row_0/A:col10/1732123492626/Put/seqid=0 2024-11-20T17:24:52,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741972_1148 (size=31058) 2024-11-20T17:24:52,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741971_1147 (size=12104) 2024-11-20T17:24:52,947 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/cc242a97ad5f4547b323947efd24ce73 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/cc242a97ad5f4547b323947efd24ce73 2024-11-20T17:24:52,953 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/68cec9e5e2294620ac659cf6df4a9ad4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/68cec9e5e2294620ac659cf6df4a9ad4 2024-11-20T17:24:52,956 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/A of 51263f54befea8a9195f5613e0910232 into cc242a97ad5f4547b323947efd24ce73(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:52,956 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:52,956 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/A, priority=13, startTime=1732123492765; duration=0sec 2024-11-20T17:24:52,956 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:52,956 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:A 2024-11-20T17:24:52,964 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/C of 51263f54befea8a9195f5613e0910232 into 68cec9e5e2294620ac659cf6df4a9ad4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:52,964 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:52,964 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/C, priority=13, startTime=1732123492767; duration=0sec 2024-11-20T17:24:52,964 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:52,964 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:C 2024-11-20T17:24:52,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123552962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123552964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:52,966 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:52,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123552965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:52,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:52,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:52,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:52,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:52,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,054 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:24:53,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123553105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123553105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,120 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:53,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:53,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:53,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,121 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123553268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123553269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123553270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,273 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:53,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:53,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:53,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,274 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,281 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:53,290 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112008b97323a64a45f597517b58030b7af1_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112008b97323a64a45f597517b58030b7af1_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:53,291 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/3928c71aa0bd44f581b5b51f26f28d6e, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:53,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/3928c71aa0bd44f581b5b51f26f28d6e is 175, key is test_row_0/A:col10/1732123492769/Put/seqid=0 2024-11-20T17:24:53,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741973_1149 (size=30955) 2024-11-20T17:24:53,297 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/3928c71aa0bd44f581b5b51f26f28d6e 2024-11-20T17:24:53,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/628215422a81439988d878ff74403f0a is 50, key is test_row_0/B:col10/1732123492769/Put/seqid=0 2024-11-20T17:24:53,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741974_1150 (size=12001) 2024-11-20T17:24:53,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/628215422a81439988d878ff74403f0a 2024-11-20T17:24:53,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/15e4ce9689c547839ff3d82508f2f2f3 is 50, key is test_row_0/C:col10/1732123492769/Put/seqid=0 2024-11-20T17:24:53,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741975_1151 (size=12001) 2024-11-20T17:24:53,330 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/15e4ce9689c547839ff3d82508f2f2f3 2024-11-20T17:24:53,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/3928c71aa0bd44f581b5b51f26f28d6e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3928c71aa0bd44f581b5b51f26f28d6e 2024-11-20T17:24:53,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3928c71aa0bd44f581b5b51f26f28d6e, entries=150, sequenceid=80, filesize=30.2 K 2024-11-20T17:24:53,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/628215422a81439988d878ff74403f0a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/628215422a81439988d878ff74403f0a 2024-11-20T17:24:53,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/628215422a81439988d878ff74403f0a, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T17:24:53,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/15e4ce9689c547839ff3d82508f2f2f3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/15e4ce9689c547839ff3d82508f2f2f3 2024-11-20T17:24:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T17:24:53,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/15e4ce9689c547839ff3d82508f2f2f3, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T17:24:53,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 51263f54befea8a9195f5613e0910232 in 608ms, sequenceid=80, compaction requested=false 2024-11-20T17:24:53,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:53,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:24:53,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:53,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:53,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:53,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:53,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:53,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:53,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200aa4b6018ed440dc932ee1e21ceaea02_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123492794/Put/seqid=0 2024-11-20T17:24:53,426 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:53,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:53,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:53,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,428 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741976_1152 (size=12154) 2024-11-20T17:24:53,438 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:53,446 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200aa4b6018ed440dc932ee1e21ceaea02_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200aa4b6018ed440dc932ee1e21ceaea02_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:53,448 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/0f4f47ee1f9f409296a9f8d80a6eb09c, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:53,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/0f4f47ee1f9f409296a9f8d80a6eb09c is 175, key is test_row_0/A:col10/1732123492794/Put/seqid=0 2024-11-20T17:24:53,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123553454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123553455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741977_1153 (size=30955) 2024-11-20T17:24:53,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123553559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123553560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,580 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:53,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:53,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:53,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,733 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:53,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:53,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:53,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123553762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123553765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123553770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123553771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123553776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:53,867 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=97, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/0f4f47ee1f9f409296a9f8d80a6eb09c 2024-11-20T17:24:53,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/d56726b756e4437997a5a7b60a1408fa is 50, key is test_row_0/B:col10/1732123492794/Put/seqid=0 2024-11-20T17:24:53,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741978_1154 (size=12001) 2024-11-20T17:24:53,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/d56726b756e4437997a5a7b60a1408fa 2024-11-20T17:24:53,887 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:53,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:53,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:53,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:53,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/da866ab17f0745d2a13b4f3470285af1 is 50, key is test_row_0/C:col10/1732123492794/Put/seqid=0 2024-11-20T17:24:53,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741979_1155 (size=12001) 2024-11-20T17:24:53,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/da866ab17f0745d2a13b4f3470285af1 2024-11-20T17:24:53,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/0f4f47ee1f9f409296a9f8d80a6eb09c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/0f4f47ee1f9f409296a9f8d80a6eb09c 2024-11-20T17:24:53,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/0f4f47ee1f9f409296a9f8d80a6eb09c, entries=150, sequenceid=97, filesize=30.2 K 2024-11-20T17:24:53,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/d56726b756e4437997a5a7b60a1408fa as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/d56726b756e4437997a5a7b60a1408fa 2024-11-20T17:24:53,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/d56726b756e4437997a5a7b60a1408fa, entries=150, sequenceid=97, filesize=11.7 K 2024-11-20T17:24:53,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/da866ab17f0745d2a13b4f3470285af1 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/da866ab17f0745d2a13b4f3470285af1 2024-11-20T17:24:53,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/da866ab17f0745d2a13b4f3470285af1, entries=150, sequenceid=97, filesize=11.7 K 2024-11-20T17:24:53,935 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 51263f54befea8a9195f5613e0910232 in 526ms, sequenceid=97, compaction requested=true 2024-11-20T17:24:53,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:53,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:53,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:53,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:53,935 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:53,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:53,935 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:53,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:53,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:53,936 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:53,936 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:53,936 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/A is initiating minor compaction (all files) 2024-11-20T17:24:53,936 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/B is initiating minor compaction (all files) 2024-11-20T17:24:53,937 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/A in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,937 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/B in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,937 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/cc242a97ad5f4547b323947efd24ce73, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3928c71aa0bd44f581b5b51f26f28d6e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/0f4f47ee1f9f409296a9f8d80a6eb09c] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=90.8 K 2024-11-20T17:24:53,937 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:53,937 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/cc242a97ad5f4547b323947efd24ce73, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3928c71aa0bd44f581b5b51f26f28d6e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/0f4f47ee1f9f409296a9f8d80a6eb09c] 2024-11-20T17:24:53,937 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/a4dcfe75a97044fb8eb43c355f12f326, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/628215422a81439988d878ff74403f0a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/d56726b756e4437997a5a7b60a1408fa] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=35.3 K 2024-11-20T17:24:53,937 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc242a97ad5f4547b323947efd24ce73, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732123492011 2024-11-20T17:24:53,937 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a4dcfe75a97044fb8eb43c355f12f326, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732123492011 2024-11-20T17:24:53,938 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3928c71aa0bd44f581b5b51f26f28d6e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123492657 2024-11-20T17:24:53,938 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 628215422a81439988d878ff74403f0a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123492657 2024-11-20T17:24:53,939 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting d56726b756e4437997a5a7b60a1408fa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732123492777 2024-11-20T17:24:53,939 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f4f47ee1f9f409296a9f8d80a6eb09c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732123492777 2024-11-20T17:24:53,947 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:53,954 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#B#compaction#133 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:53,955 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/fcdf7cc649ab4259b43ab11034a39abd is 50, key is test_row_0/B:col10/1732123492794/Put/seqid=0 2024-11-20T17:24:53,961 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120bdffc18f3f7d411bb8ec8f12517d39e6_51263f54befea8a9195f5613e0910232 store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:53,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741980_1156 (size=12207) 2024-11-20T17:24:53,985 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120bdffc18f3f7d411bb8ec8f12517d39e6_51263f54befea8a9195f5613e0910232, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:53,985 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bdffc18f3f7d411bb8ec8f12517d39e6_51263f54befea8a9195f5613e0910232 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:53,993 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/fcdf7cc649ab4259b43ab11034a39abd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/fcdf7cc649ab4259b43ab11034a39abd 2024-11-20T17:24:54,001 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/B of 51263f54befea8a9195f5613e0910232 into fcdf7cc649ab4259b43ab11034a39abd(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:54,001 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:54,001 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/B, priority=13, startTime=1732123493935; duration=0sec 2024-11-20T17:24:54,001 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:54,001 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:B 2024-11-20T17:24:54,001 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:54,003 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:54,003 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/C is initiating minor compaction (all files) 2024-11-20T17:24:54,003 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/C in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:54,003 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/68cec9e5e2294620ac659cf6df4a9ad4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/15e4ce9689c547839ff3d82508f2f2f3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/da866ab17f0745d2a13b4f3470285af1] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=35.3 K 2024-11-20T17:24:54,004 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 68cec9e5e2294620ac659cf6df4a9ad4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732123492011 2024-11-20T17:24:54,004 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 15e4ce9689c547839ff3d82508f2f2f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123492657 2024-11-20T17:24:54,005 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting da866ab17f0745d2a13b4f3470285af1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732123492777 2024-11-20T17:24:54,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741981_1157 (size=4469) 2024-11-20T17:24:54,017 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#A#compaction#132 average throughput is 0.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:54,018 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/d8dab2181096478389242005107643c0 is 175, key is test_row_0/A:col10/1732123492794/Put/seqid=0 2024-11-20T17:24:54,027 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#C#compaction#134 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:54,027 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/ac484458eb1e43ccbeb227c54f367c18 is 50, key is test_row_0/C:col10/1732123492794/Put/seqid=0 2024-11-20T17:24:54,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741983_1159 (size=12207) 2024-11-20T17:24:54,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741982_1158 (size=31161) 2024-11-20T17:24:54,041 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:54,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T17:24:54,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:54,042 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:24:54,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:54,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:54,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:54,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:54,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:54,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:54,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112090c9efacc9124325920c57a80b3299c7_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123493449/Put/seqid=0 2024-11-20T17:24:54,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:54,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:54,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741984_1160 (size=12154) 2024-11-20T17:24:54,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:54,082 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112090c9efacc9124325920c57a80b3299c7_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112090c9efacc9124325920c57a80b3299c7_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:54,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/45c5294dd0464ec69bb5fa62b63ad173, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:54,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123554086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/45c5294dd0464ec69bb5fa62b63ad173 is 175, key is test_row_0/A:col10/1732123493449/Put/seqid=0 2024-11-20T17:24:54,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123554087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741985_1161 (size=30955) 2024-11-20T17:24:54,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123554190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123554192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123554393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123554394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,443 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/ac484458eb1e43ccbeb227c54f367c18 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/ac484458eb1e43ccbeb227c54f367c18 2024-11-20T17:24:54,447 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/d8dab2181096478389242005107643c0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/d8dab2181096478389242005107643c0 2024-11-20T17:24:54,452 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/C of 51263f54befea8a9195f5613e0910232 into ac484458eb1e43ccbeb227c54f367c18(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:54,452 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:54,453 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/C, priority=13, startTime=1732123493935; duration=0sec 2024-11-20T17:24:54,453 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:54,453 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:C 2024-11-20T17:24:54,455 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/A of 51263f54befea8a9195f5613e0910232 into d8dab2181096478389242005107643c0(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:54,455 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:54,455 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/A, priority=13, startTime=1732123493935; duration=0sec 2024-11-20T17:24:54,455 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:54,455 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:A 2024-11-20T17:24:54,499 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/45c5294dd0464ec69bb5fa62b63ad173 2024-11-20T17:24:54,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/761b0500b2934750939ce6d70980bfe2 is 50, key is test_row_0/B:col10/1732123493449/Put/seqid=0 2024-11-20T17:24:54,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741986_1162 (size=12001) 2024-11-20T17:24:54,519 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/761b0500b2934750939ce6d70980bfe2 2024-11-20T17:24:54,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/f72b10d431dc41708ed13eb6a10d5c2d is 50, key is test_row_0/C:col10/1732123493449/Put/seqid=0 2024-11-20T17:24:54,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741987_1163 (size=12001) 2024-11-20T17:24:54,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123554695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123554698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123554775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123554777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:54,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123554782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:54,934 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/f72b10d431dc41708ed13eb6a10d5c2d 2024-11-20T17:24:54,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/45c5294dd0464ec69bb5fa62b63ad173 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/45c5294dd0464ec69bb5fa62b63ad173 2024-11-20T17:24:54,948 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/45c5294dd0464ec69bb5fa62b63ad173, entries=150, sequenceid=120, filesize=30.2 K 2024-11-20T17:24:54,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/761b0500b2934750939ce6d70980bfe2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/761b0500b2934750939ce6d70980bfe2 2024-11-20T17:24:54,965 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/761b0500b2934750939ce6d70980bfe2, entries=150, sequenceid=120, filesize=11.7 K 2024-11-20T17:24:54,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/f72b10d431dc41708ed13eb6a10d5c2d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f72b10d431dc41708ed13eb6a10d5c2d 2024-11-20T17:24:54,972 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f72b10d431dc41708ed13eb6a10d5c2d, entries=150, sequenceid=120, filesize=11.7 K 2024-11-20T17:24:54,974 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 51263f54befea8a9195f5613e0910232 in 932ms, sequenceid=120, compaction requested=false 2024-11-20T17:24:54,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:54,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:54,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-20T17:24:54,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-20T17:24:54,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-20T17:24:54,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.7070 sec 2024-11-20T17:24:54,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 3.7140 sec 2024-11-20T17:24:55,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:55,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T17:24:55,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:55,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:55,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:55,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:55,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:55,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:55,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bcb519f37c9e47b6988346da737905a3_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123494086/Put/seqid=0 2024-11-20T17:24:55,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123555220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123555224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741988_1164 (size=12254) 2024-11-20T17:24:55,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123555326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123555328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T17:24:55,375 INFO [Thread-657 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-20T17:24:55,376 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:55,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-20T17:24:55,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:24:55,378 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:55,379 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:55,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:55,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:24:55,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123555529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:55,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:55,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:55,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123555532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,637 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:55,642 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bcb519f37c9e47b6988346da737905a3_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bcb519f37c9e47b6988346da737905a3_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:55,644 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/9ccff51f5a644a038a447c3566d5d1ec, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:55,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/9ccff51f5a644a038a447c3566d5d1ec is 175, key is test_row_0/A:col10/1732123494086/Put/seqid=0 2024-11-20T17:24:55,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741989_1165 (size=31055) 2024-11-20T17:24:55,649 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/9ccff51f5a644a038a447c3566d5d1ec 2024-11-20T17:24:55,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/6570ee5c8bcc401397330aaab8829af2 is 50, key is test_row_0/B:col10/1732123494086/Put/seqid=0 2024-11-20T17:24:55,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741990_1166 (size=12101) 2024-11-20T17:24:55,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:24:55,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/6570ee5c8bcc401397330aaab8829af2 2024-11-20T17:24:55,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:55,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:55,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:55,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/9071b03f6e794b7cb374e7a92423983a is 50, key is test_row_0/C:col10/1732123494086/Put/seqid=0 2024-11-20T17:24:55,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741991_1167 (size=12101) 2024-11-20T17:24:55,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/9071b03f6e794b7cb374e7a92423983a 2024-11-20T17:24:55,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/9ccff51f5a644a038a447c3566d5d1ec as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/9ccff51f5a644a038a447c3566d5d1ec 2024-11-20T17:24:55,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/9ccff51f5a644a038a447c3566d5d1ec, entries=150, sequenceid=137, filesize=30.3 K 2024-11-20T17:24:55,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/6570ee5c8bcc401397330aaab8829af2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/6570ee5c8bcc401397330aaab8829af2 2024-11-20T17:24:55,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/6570ee5c8bcc401397330aaab8829af2, entries=150, sequenceid=137, filesize=11.8 K 2024-11-20T17:24:55,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/9071b03f6e794b7cb374e7a92423983a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/9071b03f6e794b7cb374e7a92423983a 2024-11-20T17:24:55,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/9071b03f6e794b7cb374e7a92423983a, entries=150, sequenceid=137, filesize=11.8 K 2024-11-20T17:24:55,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 51263f54befea8a9195f5613e0910232 in 532ms, sequenceid=137, compaction requested=true 2024-11-20T17:24:55,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:55,731 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:24:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:55,732 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:24:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:24:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:55,733 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:55,733 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/A is initiating minor compaction (all files) 2024-11-20T17:24:55,733 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:55,733 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/A in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,733 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/B is initiating minor compaction (all files) 2024-11-20T17:24:55,733 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/B in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,733 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/d8dab2181096478389242005107643c0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/45c5294dd0464ec69bb5fa62b63ad173, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/9ccff51f5a644a038a447c3566d5d1ec] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=91.0 K 2024-11-20T17:24:55,733 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,733 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/fcdf7cc649ab4259b43ab11034a39abd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/761b0500b2934750939ce6d70980bfe2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/6570ee5c8bcc401397330aaab8829af2] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=35.5 K 2024-11-20T17:24:55,733 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/d8dab2181096478389242005107643c0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/45c5294dd0464ec69bb5fa62b63ad173, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/9ccff51f5a644a038a447c3566d5d1ec] 2024-11-20T17:24:55,734 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8dab2181096478389242005107643c0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732123492777 2024-11-20T17:24:55,734 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting fcdf7cc649ab4259b43ab11034a39abd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732123492777 2024-11-20T17:24:55,735 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45c5294dd0464ec69bb5fa62b63ad173, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732123493449 2024-11-20T17:24:55,736 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 761b0500b2934750939ce6d70980bfe2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732123493449 2024-11-20T17:24:55,736 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ccff51f5a644a038a447c3566d5d1ec, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732123494071 2024-11-20T17:24:55,737 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6570ee5c8bcc401397330aaab8829af2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732123494071 2024-11-20T17:24:55,747 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:55,748 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#B#compaction#141 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:55,748 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/22cac85029c549a1bca145b8360f5768 is 50, key is test_row_0/B:col10/1732123494086/Put/seqid=0 2024-11-20T17:24:55,752 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120beddcde37d6e4de296c6d94cd852bfe3_51263f54befea8a9195f5613e0910232 store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:55,755 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120beddcde37d6e4de296c6d94cd852bfe3_51263f54befea8a9195f5613e0910232, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:55,755 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120beddcde37d6e4de296c6d94cd852bfe3_51263f54befea8a9195f5613e0910232 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:55,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741992_1168 (size=12409) 2024-11-20T17:24:55,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741993_1169 (size=4469) 2024-11-20T17:24:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:55,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:24:55,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:55,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:55,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:55,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:55,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:55,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:55,839 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:55,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:55,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:55,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120de83c6e4d7114e109a77488621bb24cc_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123495212/Put/seqid=0 2024-11-20T17:24:55,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123555854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741994_1170 (size=14794) 2024-11-20T17:24:55,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123555855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123555959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123555959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:55,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:24:55,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:55,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:55,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:55,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:55,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:55,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:56,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:56,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:56,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123556162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:56,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123556162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:56,169 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#A#compaction#142 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:56,169 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/a46b18bc2b3d458abaafe01fdc5d826c is 175, key is test_row_0/A:col10/1732123494086/Put/seqid=0 2024-11-20T17:24:56,173 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/22cac85029c549a1bca145b8360f5768 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/22cac85029c549a1bca145b8360f5768 2024-11-20T17:24:56,178 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/B of 51263f54befea8a9195f5613e0910232 into 22cac85029c549a1bca145b8360f5768(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:56,179 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:56,179 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/B, priority=13, startTime=1732123495732; duration=0sec 2024-11-20T17:24:56,179 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:24:56,179 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:B 2024-11-20T17:24:56,179 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:24:56,181 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:24:56,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741995_1171 (size=31363) 2024-11-20T17:24:56,181 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/C is initiating minor compaction (all files) 2024-11-20T17:24:56,181 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/C in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,181 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/ac484458eb1e43ccbeb227c54f367c18, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f72b10d431dc41708ed13eb6a10d5c2d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/9071b03f6e794b7cb374e7a92423983a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=35.5 K 2024-11-20T17:24:56,182 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ac484458eb1e43ccbeb227c54f367c18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1732123492777 2024-11-20T17:24:56,182 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting f72b10d431dc41708ed13eb6a10d5c2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732123493449 2024-11-20T17:24:56,182 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9071b03f6e794b7cb374e7a92423983a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732123494071 2024-11-20T17:24:56,187 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/a46b18bc2b3d458abaafe01fdc5d826c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a46b18bc2b3d458abaafe01fdc5d826c 2024-11-20T17:24:56,190 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#C#compaction#144 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:24:56,191 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/1202c0e412d44725b92e4a8b772711a3 is 50, key is test_row_0/C:col10/1732123494086/Put/seqid=0 2024-11-20T17:24:56,193 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/A of 51263f54befea8a9195f5613e0910232 into a46b18bc2b3d458abaafe01fdc5d826c(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:56,193 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:56,193 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/A, priority=13, startTime=1732123495731; duration=0sec 2024-11-20T17:24:56,193 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:56,193 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:A 2024-11-20T17:24:56,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741996_1172 (size=12409) 2024-11-20T17:24:56,258 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:56,263 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120de83c6e4d7114e109a77488621bb24cc_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120de83c6e4d7114e109a77488621bb24cc_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:56,264 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/49c6733d93d842279e1becdb51661282, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:56,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/49c6733d93d842279e1becdb51661282 is 175, key is test_row_0/A:col10/1732123495212/Put/seqid=0 2024-11-20T17:24:56,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741997_1173 (size=39749) 2024-11-20T17:24:56,301 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:56,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:56,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:56,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,453 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:56,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:56,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:56,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123556465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:56,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123556466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:56,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:24:56,603 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/1202c0e412d44725b92e4a8b772711a3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1202c0e412d44725b92e4a8b772711a3 2024-11-20T17:24:56,606 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:56,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:56,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:56,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,607 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,610 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/C of 51263f54befea8a9195f5613e0910232 into 1202c0e412d44725b92e4a8b772711a3(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:24:56,610 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:56,610 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/C, priority=13, startTime=1732123495732; duration=0sec 2024-11-20T17:24:56,610 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:24:56,610 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:C 2024-11-20T17:24:56,670 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/49c6733d93d842279e1becdb51661282 2024-11-20T17:24:56,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/225e51ad6fe041b5be7d423d6b996144 is 50, key is test_row_0/B:col10/1732123495212/Put/seqid=0 2024-11-20T17:24:56,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741998_1174 (size=12151) 2024-11-20T17:24:56,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:56,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:56,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:56,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123556783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:56,785 DEBUG [Thread-647 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4129 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:24:56,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123556789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:56,790 DEBUG [Thread-649 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:24:56,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123556789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:56,791 DEBUG [Thread-653 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:24:56,913 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:56,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:56,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:56,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:56,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:56,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123556969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:56,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123556972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:57,066 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:57,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:57,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:57,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/225e51ad6fe041b5be7d423d6b996144 2024-11-20T17:24:57,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/b3b606df97ab42e39b1d7d86c9bf22e4 is 50, key is test_row_0/C:col10/1732123495212/Put/seqid=0 2024-11-20T17:24:57,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741999_1175 (size=12151) 2024-11-20T17:24:57,219 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:57,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:57,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:57,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,373 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:57,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:57,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:24:57,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/b3b606df97ab42e39b1d7d86c9bf22e4 2024-11-20T17:24:57,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/49c6733d93d842279e1becdb51661282 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/49c6733d93d842279e1becdb51661282 2024-11-20T17:24:57,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/49c6733d93d842279e1becdb51661282, entries=200, sequenceid=159, filesize=38.8 K 2024-11-20T17:24:57,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/225e51ad6fe041b5be7d423d6b996144 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/225e51ad6fe041b5be7d423d6b996144 2024-11-20T17:24:57,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/225e51ad6fe041b5be7d423d6b996144, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T17:24:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/b3b606df97ab42e39b1d7d86c9bf22e4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b3b606df97ab42e39b1d7d86c9bf22e4 2024-11-20T17:24:57,526 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:57,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:57,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:57,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:57,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b3b606df97ab42e39b1d7d86c9bf22e4, entries=150, sequenceid=159, filesize=11.9 K 2024-11-20T17:24:57,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 51263f54befea8a9195f5613e0910232 in 1698ms, sequenceid=159, compaction requested=false 2024-11-20T17:24:57,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:57,679 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:57,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T17:24:57,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:57,681 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:24:57,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:57,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:57,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:57,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:57,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:57,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:57,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120af4e3a4f6c8f4162b36ec636c2bb102f_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123495845/Put/seqid=0 2024-11-20T17:24:57,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742000_1176 (size=12304) 2024-11-20T17:24:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:57,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:58,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:58,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123558026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:58,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123558028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:58,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:58,099 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120af4e3a4f6c8f4162b36ec636c2bb102f_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af4e3a4f6c8f4162b36ec636c2bb102f_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:58,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/a9bb9212e59746d1b72b8dffef7b28cf, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:58,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/a9bb9212e59746d1b72b8dffef7b28cf is 175, key is test_row_0/A:col10/1732123495845/Put/seqid=0 2024-11-20T17:24:58,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742001_1177 (size=31105) 2024-11-20T17:24:58,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:58,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123558129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:58,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:58,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123558131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:58,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:58,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:58,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123558333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:58,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123558335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:58,506 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/a9bb9212e59746d1b72b8dffef7b28cf 2024-11-20T17:24:58,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/8a09f1bcd8204dc69e618870a30d1bd6 is 50, key is test_row_0/B:col10/1732123495845/Put/seqid=0 2024-11-20T17:24:58,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742002_1178 (size=12151) 2024-11-20T17:24:58,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123558636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:58,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123558639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:58,920 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/8a09f1bcd8204dc69e618870a30d1bd6 2024-11-20T17:24:58,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/6b8753b60e0c4c7ea619e21ecdd729b2 is 50, key is test_row_0/C:col10/1732123495845/Put/seqid=0 2024-11-20T17:24:58,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742003_1179 (size=12151) 2024-11-20T17:24:58,937 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/6b8753b60e0c4c7ea619e21ecdd729b2 2024-11-20T17:24:58,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/a9bb9212e59746d1b72b8dffef7b28cf as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a9bb9212e59746d1b72b8dffef7b28cf 2024-11-20T17:24:58,946 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a9bb9212e59746d1b72b8dffef7b28cf, entries=150, sequenceid=176, filesize=30.4 K 2024-11-20T17:24:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/8a09f1bcd8204dc69e618870a30d1bd6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/8a09f1bcd8204dc69e618870a30d1bd6 2024-11-20T17:24:58,951 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/8a09f1bcd8204dc69e618870a30d1bd6, entries=150, sequenceid=176, filesize=11.9 K 2024-11-20T17:24:58,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/6b8753b60e0c4c7ea619e21ecdd729b2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/6b8753b60e0c4c7ea619e21ecdd729b2 2024-11-20T17:24:58,956 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/6b8753b60e0c4c7ea619e21ecdd729b2, entries=150, sequenceid=176, filesize=11.9 K 2024-11-20T17:24:58,957 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 51263f54befea8a9195f5613e0910232 in 1277ms, sequenceid=176, compaction requested=true 2024-11-20T17:24:58,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:24:58,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:58,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-20T17:24:58,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-20T17:24:58,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-20T17:24:58,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5790 sec 2024-11-20T17:24:58,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 3.5840 sec 2024-11-20T17:24:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:24:59,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:24:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:24:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:24:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:24:59,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:24:59,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f40898354ab043ddbcd935d8fbeac0fa_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123498025/Put/seqid=0 2024-11-20T17:24:59,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742004_1180 (size=12304) 2024-11-20T17:24:59,158 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:24:59,163 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f40898354ab043ddbcd935d8fbeac0fa_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f40898354ab043ddbcd935d8fbeac0fa_51263f54befea8a9195f5613e0910232 2024-11-20T17:24:59,164 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/71cb6fdefaca477a8d19965000dab144, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:24:59,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/71cb6fdefaca477a8d19965000dab144 is 175, key is test_row_0/A:col10/1732123498025/Put/seqid=0 2024-11-20T17:24:59,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:59,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123559161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:59,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:59,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123559162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:59,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742005_1181 (size=31105) 2024-11-20T17:24:59,189 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/71cb6fdefaca477a8d19965000dab144 2024-11-20T17:24:59,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/3e259d8286f1458fa5f92390a347f7e8 is 50, key is test_row_0/B:col10/1732123498025/Put/seqid=0 2024-11-20T17:24:59,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742006_1182 (size=12151) 2024-11-20T17:24:59,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123559267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:59,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123559267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:59,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:59,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123559470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:59,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:59,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123559470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T17:24:59,483 INFO [Thread-657 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-20T17:24:59,485 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:24:59,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-20T17:24:59,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:24:59,486 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:24:59,487 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:24:59,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:24:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:24:59,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/3e259d8286f1458fa5f92390a347f7e8 2024-11-20T17:24:59,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/24e592c8ac21458eb16047468ca93d41 is 50, key is test_row_0/C:col10/1732123498025/Put/seqid=0 2024-11-20T17:24:59,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742007_1183 (size=12151) 2024-11-20T17:24:59,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:59,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T17:24:59,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:59,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:59,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:59,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:59,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:59,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123559774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:59,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:24:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123559776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:24:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:24:59,792 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:59,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T17:24:59,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:59,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:59,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:59,793 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:59,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:59,945 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:24:59,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T17:24:59,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:59,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:24:59,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:24:59,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:59,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:24:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:00,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/24e592c8ac21458eb16047468ca93d41 2024-11-20T17:25:00,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/71cb6fdefaca477a8d19965000dab144 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/71cb6fdefaca477a8d19965000dab144 2024-11-20T17:25:00,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/71cb6fdefaca477a8d19965000dab144, entries=150, sequenceid=199, filesize=30.4 K 2024-11-20T17:25:00,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/3e259d8286f1458fa5f92390a347f7e8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3e259d8286f1458fa5f92390a347f7e8 2024-11-20T17:25:00,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3e259d8286f1458fa5f92390a347f7e8, entries=150, sequenceid=199, filesize=11.9 K 2024-11-20T17:25:00,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/24e592c8ac21458eb16047468ca93d41 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/24e592c8ac21458eb16047468ca93d41 2024-11-20T17:25:00,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/24e592c8ac21458eb16047468ca93d41, entries=150, sequenceid=199, filesize=11.9 K 2024-11-20T17:25:00,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 51263f54befea8a9195f5613e0910232 in 893ms, sequenceid=199, compaction requested=true 2024-11-20T17:25:00,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:00,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:00,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:00,037 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:00,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:00,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:00,037 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:00,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:00,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:00,039 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:00,039 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133322 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:00,040 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/B is initiating minor compaction (all files) 2024-11-20T17:25:00,040 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/A is initiating minor compaction (all files) 2024-11-20T17:25:00,040 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/B in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:00,040 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/A in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:00,040 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/22cac85029c549a1bca145b8360f5768, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/225e51ad6fe041b5be7d423d6b996144, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/8a09f1bcd8204dc69e618870a30d1bd6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3e259d8286f1458fa5f92390a347f7e8] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=47.7 K 2024-11-20T17:25:00,040 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a46b18bc2b3d458abaafe01fdc5d826c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/49c6733d93d842279e1becdb51661282, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a9bb9212e59746d1b72b8dffef7b28cf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/71cb6fdefaca477a8d19965000dab144] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=130.2 K 2024-11-20T17:25:00,040 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:00,040 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a46b18bc2b3d458abaafe01fdc5d826c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/49c6733d93d842279e1becdb51661282, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a9bb9212e59746d1b72b8dffef7b28cf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/71cb6fdefaca477a8d19965000dab144] 2024-11-20T17:25:00,040 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 22cac85029c549a1bca145b8360f5768, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732123494071 2024-11-20T17:25:00,041 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting a46b18bc2b3d458abaafe01fdc5d826c, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732123494071 2024-11-20T17:25:00,041 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 225e51ad6fe041b5be7d423d6b996144, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732123495212 2024-11-20T17:25:00,041 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49c6733d93d842279e1becdb51661282, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732123495212 2024-11-20T17:25:00,041 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a09f1bcd8204dc69e618870a30d1bd6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732123495845 2024-11-20T17:25:00,042 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9bb9212e59746d1b72b8dffef7b28cf, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732123495845 2024-11-20T17:25:00,042 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e259d8286f1458fa5f92390a347f7e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732123498024 2024-11-20T17:25:00,042 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71cb6fdefaca477a8d19965000dab144, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732123498024 2024-11-20T17:25:00,051 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#B#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:00,052 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/7665bb49ce554ef6891aaf587658a8a3 is 50, key is test_row_0/B:col10/1732123498025/Put/seqid=0 2024-11-20T17:25:00,054 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:00,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742008_1184 (size=12595) 2024-11-20T17:25:00,060 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e74022a181e744a7a7755f8dd249ffad_51263f54befea8a9195f5613e0910232 store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:00,063 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e74022a181e744a7a7755f8dd249ffad_51263f54befea8a9195f5613e0910232, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:00,063 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e74022a181e744a7a7755f8dd249ffad_51263f54befea8a9195f5613e0910232 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:00,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742009_1185 (size=4469) 2024-11-20T17:25:00,069 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#A#compaction#154 average throughput is 1.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:00,069 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/3f92cac958dc45d3874bb25072ff58e4 is 175, key is test_row_0/A:col10/1732123498025/Put/seqid=0 2024-11-20T17:25:00,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742010_1186 (size=31549) 2024-11-20T17:25:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:25:00,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:00,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T17:25:00,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:00,099 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:25:00,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:25:00,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:00,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:25:00,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:00,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:25:00,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:00,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a4c8bd1c098948a19079277fdbd33206_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123499153/Put/seqid=0 2024-11-20T17:25:00,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742011_1187 (size=12304) 2024-11-20T17:25:00,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:00,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:00,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123560308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123560310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123560412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123560414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,464 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/7665bb49ce554ef6891aaf587658a8a3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7665bb49ce554ef6891aaf587658a8a3 2024-11-20T17:25:00,470 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51263f54befea8a9195f5613e0910232/B of 51263f54befea8a9195f5613e0910232 into 7665bb49ce554ef6891aaf587658a8a3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:00,470 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:00,470 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/B, priority=12, startTime=1732123500037; duration=0sec 2024-11-20T17:25:00,470 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:00,470 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:B 2024-11-20T17:25:00,470 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:00,471 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:00,471 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/C is initiating minor compaction (all files) 2024-11-20T17:25:00,471 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/C in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:00,472 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1202c0e412d44725b92e4a8b772711a3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b3b606df97ab42e39b1d7d86c9bf22e4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/6b8753b60e0c4c7ea619e21ecdd729b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/24e592c8ac21458eb16047468ca93d41] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=47.7 K 2024-11-20T17:25:00,472 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 1202c0e412d44725b92e4a8b772711a3, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732123494071 2024-11-20T17:25:00,473 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b3b606df97ab42e39b1d7d86c9bf22e4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732123495212 2024-11-20T17:25:00,473 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b8753b60e0c4c7ea619e21ecdd729b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732123495845 2024-11-20T17:25:00,473 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 24e592c8ac21458eb16047468ca93d41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732123498024 2024-11-20T17:25:00,478 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/3f92cac958dc45d3874bb25072ff58e4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3f92cac958dc45d3874bb25072ff58e4 2024-11-20T17:25:00,483 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#C#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:00,484 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51263f54befea8a9195f5613e0910232/A of 51263f54befea8a9195f5613e0910232 into 3f92cac958dc45d3874bb25072ff58e4(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:00,484 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:00,484 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/A, priority=12, startTime=1732123500037; duration=0sec 2024-11-20T17:25:00,484 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:00,484 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:A 2024-11-20T17:25:00,484 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/1f3378f5a73b4c6983ca2d7a51aa4a7c is 50, key is test_row_0/C:col10/1732123498025/Put/seqid=0 2024-11-20T17:25:00,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742012_1188 (size=12595) 2024-11-20T17:25:00,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:00,516 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a4c8bd1c098948a19079277fdbd33206_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4c8bd1c098948a19079277fdbd33206_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:00,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/b12acf2e3ddc443cbbe0514cc92bc243, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:00,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/b12acf2e3ddc443cbbe0514cc92bc243 is 175, key is test_row_0/A:col10/1732123499153/Put/seqid=0 2024-11-20T17:25:00,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742013_1189 (size=31105) 2024-11-20T17:25:00,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:25:00,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123560616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123560616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,682 INFO [master/d514dc944523:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T17:25:00,682 INFO [master/d514dc944523:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T17:25:00,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123560796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,798 DEBUG [Thread-647 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:00,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123560809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,812 DEBUG [Thread-649 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:00,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123560812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,813 DEBUG [Thread-653 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:00,894 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/1f3378f5a73b4c6983ca2d7a51aa4a7c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1f3378f5a73b4c6983ca2d7a51aa4a7c 2024-11-20T17:25:00,899 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 51263f54befea8a9195f5613e0910232/C of 51263f54befea8a9195f5613e0910232 into 1f3378f5a73b4c6983ca2d7a51aa4a7c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:00,899 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:00,899 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/C, priority=12, startTime=1732123500037; duration=0sec 2024-11-20T17:25:00,899 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:00,900 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:C 2024-11-20T17:25:00,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123560918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:00,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123560919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:00,924 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/b12acf2e3ddc443cbbe0514cc92bc243 2024-11-20T17:25:00,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/ae77d136da7e4b86ac125cf748fbdaee is 50, key is test_row_0/B:col10/1732123499153/Put/seqid=0 2024-11-20T17:25:00,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742014_1190 (size=12151) 2024-11-20T17:25:01,337 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/ae77d136da7e4b86ac125cf748fbdaee 2024-11-20T17:25:01,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/b48be762190e4119acaf509be1c7c72c is 50, key is test_row_0/C:col10/1732123499153/Put/seqid=0 2024-11-20T17:25:01,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742015_1191 (size=12151) 2024-11-20T17:25:01,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:01,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123561423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:01,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:01,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123561423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:01,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:25:01,752 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/b48be762190e4119acaf509be1c7c72c 2024-11-20T17:25:01,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/b12acf2e3ddc443cbbe0514cc92bc243 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b12acf2e3ddc443cbbe0514cc92bc243 2024-11-20T17:25:01,762 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b12acf2e3ddc443cbbe0514cc92bc243, entries=150, sequenceid=212, filesize=30.4 K 2024-11-20T17:25:01,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/ae77d136da7e4b86ac125cf748fbdaee as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/ae77d136da7e4b86ac125cf748fbdaee 2024-11-20T17:25:01,768 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/ae77d136da7e4b86ac125cf748fbdaee, entries=150, sequenceid=212, filesize=11.9 K 2024-11-20T17:25:01,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/b48be762190e4119acaf509be1c7c72c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b48be762190e4119acaf509be1c7c72c 2024-11-20T17:25:01,773 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b48be762190e4119acaf509be1c7c72c, entries=150, sequenceid=212, filesize=11.9 K 2024-11-20T17:25:01,774 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 51263f54befea8a9195f5613e0910232 in 1675ms, sequenceid=212, compaction requested=false 2024-11-20T17:25:01,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:01,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:01,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-20T17:25:01,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-20T17:25:01,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-20T17:25:01,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2890 sec 2024-11-20T17:25:01,779 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.2930 sec 2024-11-20T17:25:02,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:02,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:25:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:25:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:02,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:25:02,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:02,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:25:02,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:02,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207a9895b25dc14793911d6de44243004d_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123502428/Put/seqid=0 2024-11-20T17:25:02,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:02,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123562443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:02,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:02,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123562444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:02,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742016_1192 (size=12304) 2024-11-20T17:25:02,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:02,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123562546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:02,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:02,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123562547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:02,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:02,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123562750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:02,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:02,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123562750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:02,849 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:02,854 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207a9895b25dc14793911d6de44243004d_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207a9895b25dc14793911d6de44243004d_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:02,856 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/fb96f8255d5d447c89deb99e36854af6, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:02,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/fb96f8255d5d447c89deb99e36854af6 is 175, key is test_row_0/A:col10/1732123502428/Put/seqid=0 2024-11-20T17:25:02,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742017_1193 (size=31105) 2024-11-20T17:25:03,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123563054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:03,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123563054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:03,266 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/fb96f8255d5d447c89deb99e36854af6 2024-11-20T17:25:03,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/7ded997ee735499b8e08fbf93919a1fb is 50, key is test_row_0/B:col10/1732123502428/Put/seqid=0 2024-11-20T17:25:03,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742018_1194 (size=12151) 2024-11-20T17:25:03,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:03,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123563558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:03,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:03,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123563559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:03,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T17:25:03,592 INFO [Thread-657 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-20T17:25:03,594 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:03,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-20T17:25:03,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:25:03,596 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:03,597 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:03,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:03,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/7ded997ee735499b8e08fbf93919a1fb 2024-11-20T17:25:03,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/69874b7c73d440e3a0dd6f23a8d06f06 is 50, key is test_row_0/C:col10/1732123502428/Put/seqid=0 2024-11-20T17:25:03,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742019_1195 (size=12151) 2024-11-20T17:25:03,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:25:03,749 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:03,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:25:03,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:03,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:03,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:03,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:03,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:03,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:03,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:25:03,902 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:03,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:25:03,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:03,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:03,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:03,903 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:03,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:03,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:04,055 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:04,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:25:04,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:04,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:04,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:04,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:04,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:04,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:04,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/69874b7c73d440e3a0dd6f23a8d06f06 2024-11-20T17:25:04,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/fb96f8255d5d447c89deb99e36854af6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/fb96f8255d5d447c89deb99e36854af6 2024-11-20T17:25:04,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/fb96f8255d5d447c89deb99e36854af6, entries=150, sequenceid=239, filesize=30.4 K 2024-11-20T17:25:04,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/7ded997ee735499b8e08fbf93919a1fb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7ded997ee735499b8e08fbf93919a1fb 2024-11-20T17:25:04,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7ded997ee735499b8e08fbf93919a1fb, entries=150, sequenceid=239, filesize=11.9 K 2024-11-20T17:25:04,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/69874b7c73d440e3a0dd6f23a8d06f06 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/69874b7c73d440e3a0dd6f23a8d06f06 2024-11-20T17:25:04,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/69874b7c73d440e3a0dd6f23a8d06f06, entries=150, sequenceid=239, filesize=11.9 K 2024-11-20T17:25:04,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 51263f54befea8a9195f5613e0910232 in 1692ms, sequenceid=239, compaction requested=true 2024-11-20T17:25:04,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:04,122 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:04,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:04,123 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:04,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:04,124 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:04,124 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/A is initiating minor compaction (all files) 2024-11-20T17:25:04,124 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/A in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:04,124 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3f92cac958dc45d3874bb25072ff58e4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b12acf2e3ddc443cbbe0514cc92bc243, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/fb96f8255d5d447c89deb99e36854af6] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=91.6 K 2024-11-20T17:25:04,124 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:04,124 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3f92cac958dc45d3874bb25072ff58e4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b12acf2e3ddc443cbbe0514cc92bc243, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/fb96f8255d5d447c89deb99e36854af6] 2024-11-20T17:25:04,125 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f92cac958dc45d3874bb25072ff58e4, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732123498024 2024-11-20T17:25:04,125 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:04,125 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/B is initiating minor compaction (all files) 2024-11-20T17:25:04,125 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/B in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:04,125 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7665bb49ce554ef6891aaf587658a8a3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/ae77d136da7e4b86ac125cf748fbdaee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7ded997ee735499b8e08fbf93919a1fb] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=36.0 K 2024-11-20T17:25:04,125 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting b12acf2e3ddc443cbbe0514cc92bc243, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123499153 2024-11-20T17:25:04,126 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7665bb49ce554ef6891aaf587658a8a3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732123498024 2024-11-20T17:25:04,126 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb96f8255d5d447c89deb99e36854af6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732123500307 2024-11-20T17:25:04,126 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ae77d136da7e4b86ac125cf748fbdaee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123499153 2024-11-20T17:25:04,127 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ded997ee735499b8e08fbf93919a1fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732123500307 2024-11-20T17:25:04,134 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:04,137 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#B#compaction#163 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:04,138 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/773334aad75d462d8140a634acf6282d is 50, key is test_row_0/B:col10/1732123502428/Put/seqid=0 2024-11-20T17:25:04,148 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120f1f8277cf4d942349c4fcb4857a9ef28_51263f54befea8a9195f5613e0910232 store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:04,151 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120f1f8277cf4d942349c4fcb4857a9ef28_51263f54befea8a9195f5613e0910232, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:04,151 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f1f8277cf4d942349c4fcb4857a9ef28_51263f54befea8a9195f5613e0910232 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:04,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742020_1196 (size=12697) 2024-11-20T17:25:04,180 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/773334aad75d462d8140a634acf6282d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/773334aad75d462d8140a634acf6282d 2024-11-20T17:25:04,187 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/B of 51263f54befea8a9195f5613e0910232 into 773334aad75d462d8140a634acf6282d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:04,187 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:04,187 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/B, priority=13, startTime=1732123504123; duration=0sec 2024-11-20T17:25:04,187 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:04,187 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:B 2024-11-20T17:25:04,187 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:04,188 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:04,188 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/C is initiating minor compaction (all files) 2024-11-20T17:25:04,189 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/C in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:04,189 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1f3378f5a73b4c6983ca2d7a51aa4a7c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b48be762190e4119acaf509be1c7c72c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/69874b7c73d440e3a0dd6f23a8d06f06] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=36.0 K 2024-11-20T17:25:04,189 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f3378f5a73b4c6983ca2d7a51aa4a7c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732123498024 2024-11-20T17:25:04,190 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b48be762190e4119acaf509be1c7c72c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123499153 2024-11-20T17:25:04,191 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 69874b7c73d440e3a0dd6f23a8d06f06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732123500307 2024-11-20T17:25:04,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:25:04,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742021_1197 (size=4469) 2024-11-20T17:25:04,205 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#C#compaction#164 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:04,205 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/4e98c51470cf437bb218f629f8a84e13 is 50, key is test_row_0/C:col10/1732123502428/Put/seqid=0 2024-11-20T17:25:04,209 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:04,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-20T17:25:04,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:04,210 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:25:04,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:25:04,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:04,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:25:04,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:04,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:25:04,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:04,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742022_1198 (size=12697) 2024-11-20T17:25:04,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201acf51dbbc7f44e582b60e3ad7608a8c_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123502437/Put/seqid=0 2024-11-20T17:25:04,233 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/4e98c51470cf437bb218f629f8a84e13 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/4e98c51470cf437bb218f629f8a84e13 2024-11-20T17:25:04,244 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/C of 51263f54befea8a9195f5613e0910232 into 4e98c51470cf437bb218f629f8a84e13(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:04,244 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:04,244 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/C, priority=13, startTime=1732123504123; duration=0sec 2024-11-20T17:25:04,244 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:04,244 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:C 2024-11-20T17:25:04,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742023_1199 (size=12304) 2024-11-20T17:25:04,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:04,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:04,605 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#A#compaction#162 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:04,606 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/5295988cf7e64fa9aa1d9b19ec92e17f is 175, key is test_row_0/A:col10/1732123502428/Put/seqid=0 2024-11-20T17:25:04,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742024_1200 (size=31651) 2024-11-20T17:25:04,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123564619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:04,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123564619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:04,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:04,652 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201acf51dbbc7f44e582b60e3ad7608a8c_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201acf51dbbc7f44e582b60e3ad7608a8c_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:04,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/5f99e7099b644dc9b2e152bcd9715cc9, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:04,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/5f99e7099b644dc9b2e152bcd9715cc9 is 175, key is test_row_0/A:col10/1732123502437/Put/seqid=0 2024-11-20T17:25:04,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742025_1201 (size=31105) 2024-11-20T17:25:04,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:25:04,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:04,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123564722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:04,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:04,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123564722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:04,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123564925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:04,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123564925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,017 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/5295988cf7e64fa9aa1d9b19ec92e17f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5295988cf7e64fa9aa1d9b19ec92e17f 2024-11-20T17:25:05,022 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/A of 51263f54befea8a9195f5613e0910232 into 5295988cf7e64fa9aa1d9b19ec92e17f(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:05,022 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:05,022 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/A, priority=13, startTime=1732123504122; duration=0sec 2024-11-20T17:25:05,023 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:05,023 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:A 2024-11-20T17:25:05,059 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/5f99e7099b644dc9b2e152bcd9715cc9 2024-11-20T17:25:05,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/002b9d0c4c814b07811a49b6f0b318d4 is 50, key is test_row_0/B:col10/1732123502437/Put/seqid=0 2024-11-20T17:25:05,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742026_1202 (size=12151) 2024-11-20T17:25:05,078 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/002b9d0c4c814b07811a49b6f0b318d4 2024-11-20T17:25:05,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/feb4d653d7574243b33b90bd9ad8d559 is 50, key is test_row_0/C:col10/1732123502437/Put/seqid=0 2024-11-20T17:25:05,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742027_1203 (size=12151) 2024-11-20T17:25:05,105 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/feb4d653d7574243b33b90bd9ad8d559 2024-11-20T17:25:05,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/5f99e7099b644dc9b2e152bcd9715cc9 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5f99e7099b644dc9b2e152bcd9715cc9 2024-11-20T17:25:05,116 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5f99e7099b644dc9b2e152bcd9715cc9, entries=150, sequenceid=252, filesize=30.4 K 2024-11-20T17:25:05,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/002b9d0c4c814b07811a49b6f0b318d4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/002b9d0c4c814b07811a49b6f0b318d4 2024-11-20T17:25:05,122 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/002b9d0c4c814b07811a49b6f0b318d4, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T17:25:05,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/feb4d653d7574243b33b90bd9ad8d559 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/feb4d653d7574243b33b90bd9ad8d559 2024-11-20T17:25:05,128 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/feb4d653d7574243b33b90bd9ad8d559, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T17:25:05,129 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 51263f54befea8a9195f5613e0910232 in 919ms, sequenceid=252, compaction requested=false 2024-11-20T17:25:05,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:05,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:05,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-20T17:25:05,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-20T17:25:05,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T17:25:05,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5340 sec 2024-11-20T17:25:05,134 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.5390 sec 2024-11-20T17:25:05,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:05,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:25:05,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:25:05,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:05,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:25:05,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:05,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:25:05,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:05,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:05,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123565240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:05,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123565241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200605816524994a37bb4ad5c9dd7215a1_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123504618/Put/seqid=0 2024-11-20T17:25:05,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742028_1204 (size=17534) 2024-11-20T17:25:05,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:05,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123565343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:05,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123565343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:05,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123565546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123565545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,654 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:05,659 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200605816524994a37bb4ad5c9dd7215a1_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200605816524994a37bb4ad5c9dd7215a1_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:05,660 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/32a0cc4eb7a54de3ad2670f51042678c, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:05,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/32a0cc4eb7a54de3ad2670f51042678c is 175, key is test_row_0/A:col10/1732123504618/Put/seqid=0 2024-11-20T17:25:05,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742029_1205 (size=48639) 2024-11-20T17:25:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T17:25:05,703 INFO [Thread-657 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T17:25:05,704 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-20T17:25:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:25:05,706 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:05,706 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:05,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:25:05,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123565849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123565850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:05,858 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:05,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:25:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:05,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:25:06,011 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:06,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:25:06,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:06,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,070 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=280, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/32a0cc4eb7a54de3ad2670f51042678c 2024-11-20T17:25:06,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/094299b218ca4445960cfaf4a8559de7 is 50, key is test_row_0/B:col10/1732123504618/Put/seqid=0 2024-11-20T17:25:06,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742030_1206 (size=12301) 2024-11-20T17:25:06,164 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:06,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:25:06,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:06,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:25:06,317 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:06,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:25:06,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:06,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123566355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:06,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123566356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:06,471 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:06,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:25:06,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:06,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/094299b218ca4445960cfaf4a8559de7 2024-11-20T17:25:06,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/492396feaed94a7d869fba5450aa5e2c is 50, key is test_row_0/C:col10/1732123504618/Put/seqid=0 2024-11-20T17:25:06,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742031_1207 (size=12301) 2024-11-20T17:25:06,624 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:06,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:25:06,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:06,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,777 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:06,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:25:06,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:06,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:06,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:25:06,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/492396feaed94a7d869fba5450aa5e2c 2024-11-20T17:25:06,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/32a0cc4eb7a54de3ad2670f51042678c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/32a0cc4eb7a54de3ad2670f51042678c 2024-11-20T17:25:06,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/32a0cc4eb7a54de3ad2670f51042678c, entries=250, sequenceid=280, filesize=47.5 K 2024-11-20T17:25:06,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/094299b218ca4445960cfaf4a8559de7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/094299b218ca4445960cfaf4a8559de7 2024-11-20T17:25:06,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/094299b218ca4445960cfaf4a8559de7, entries=150, sequenceid=280, filesize=12.0 K 2024-11-20T17:25:06,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/492396feaed94a7d869fba5450aa5e2c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/492396feaed94a7d869fba5450aa5e2c 2024-11-20T17:25:06,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/492396feaed94a7d869fba5450aa5e2c, entries=150, sequenceid=280, filesize=12.0 K 2024-11-20T17:25:06,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 51263f54befea8a9195f5613e0910232 in 1690ms, sequenceid=280, compaction requested=true 2024-11-20T17:25:06,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:06,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:06,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:06,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:06,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:06,922 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:06,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:06,922 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:06,922 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:06,923 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:06,923 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111395 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:06,923 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/A is initiating minor compaction (all files) 2024-11-20T17:25:06,923 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/B is initiating minor compaction (all files) 2024-11-20T17:25:06,923 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/A in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,923 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/B in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,923 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/773334aad75d462d8140a634acf6282d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/002b9d0c4c814b07811a49b6f0b318d4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/094299b218ca4445960cfaf4a8559de7] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=36.3 K 2024-11-20T17:25:06,923 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5295988cf7e64fa9aa1d9b19ec92e17f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5f99e7099b644dc9b2e152bcd9715cc9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/32a0cc4eb7a54de3ad2670f51042678c] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=108.8 K 2024-11-20T17:25:06,923 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,924 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5295988cf7e64fa9aa1d9b19ec92e17f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5f99e7099b644dc9b2e152bcd9715cc9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/32a0cc4eb7a54de3ad2670f51042678c] 2024-11-20T17:25:06,924 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 773334aad75d462d8140a634acf6282d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732123500307 2024-11-20T17:25:06,924 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5295988cf7e64fa9aa1d9b19ec92e17f, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732123500307 2024-11-20T17:25:06,924 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 002b9d0c4c814b07811a49b6f0b318d4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123502437 2024-11-20T17:25:06,925 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f99e7099b644dc9b2e152bcd9715cc9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123502437 2024-11-20T17:25:06,925 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32a0cc4eb7a54de3ad2670f51042678c, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123504590 2024-11-20T17:25:06,925 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 094299b218ca4445960cfaf4a8559de7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123504618 2024-11-20T17:25:06,930 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:06,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-20T17:25:06,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,931 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T17:25:06,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:25:06,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:06,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:25:06,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:06,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:25:06,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:06,939 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#B#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:06,940 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/94521670fd674f288f7eb1c8498608b3 is 50, key is test_row_0/B:col10/1732123504618/Put/seqid=0 2024-11-20T17:25:06,948 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:06,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208308505b6e004e258d9947f08e68f450_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123505232/Put/seqid=0 2024-11-20T17:25:06,957 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112041e34858ef594615b37b529e4b997150_51263f54befea8a9195f5613e0910232 store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:06,958 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112041e34858ef594615b37b529e4b997150_51263f54befea8a9195f5613e0910232, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:06,958 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112041e34858ef594615b37b529e4b997150_51263f54befea8a9195f5613e0910232 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:06,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742032_1208 (size=12949) 2024-11-20T17:25:06,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742033_1209 (size=12454) 2024-11-20T17:25:06,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742034_1210 (size=4469) 2024-11-20T17:25:06,973 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#A#compaction#172 average throughput is 0.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:06,974 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/b342eebbc0a14891a2177d4840971479 is 175, key is test_row_0/A:col10/1732123504618/Put/seqid=0 2024-11-20T17:25:06,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742035_1211 (size=31903) 2024-11-20T17:25:06,983 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/b342eebbc0a14891a2177d4840971479 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b342eebbc0a14891a2177d4840971479 2024-11-20T17:25:06,988 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/A of 51263f54befea8a9195f5613e0910232 into b342eebbc0a14891a2177d4840971479(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:06,988 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:06,988 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/A, priority=13, startTime=1732123506922; duration=0sec 2024-11-20T17:25:06,988 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:06,989 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:A 2024-11-20T17:25:06,989 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:06,990 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:06,990 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/C is initiating minor compaction (all files) 2024-11-20T17:25:06,990 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/C in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:06,990 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/4e98c51470cf437bb218f629f8a84e13, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/feb4d653d7574243b33b90bd9ad8d559, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/492396feaed94a7d869fba5450aa5e2c] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=36.3 K 2024-11-20T17:25:06,991 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e98c51470cf437bb218f629f8a84e13, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732123500307 2024-11-20T17:25:06,991 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting feb4d653d7574243b33b90bd9ad8d559, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123502437 2024-11-20T17:25:06,991 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 492396feaed94a7d869fba5450aa5e2c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123504618 2024-11-20T17:25:06,999 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#C#compaction#174 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:07,000 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/b01f3a12544b4d1bab0995bc6a80ff93 is 50, key is test_row_0/C:col10/1732123504618/Put/seqid=0 2024-11-20T17:25:07,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742036_1212 (size=12949) 2024-11-20T17:25:07,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:07,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:07,366 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/94521670fd674f288f7eb1c8498608b3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/94521670fd674f288f7eb1c8498608b3 2024-11-20T17:25:07,372 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/B of 51263f54befea8a9195f5613e0910232 into 94521670fd674f288f7eb1c8498608b3(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:07,372 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:07,372 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/B, priority=13, startTime=1732123506922; duration=0sec 2024-11-20T17:25:07,372 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:07,372 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:B 2024-11-20T17:25:07,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:07,380 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208308505b6e004e258d9947f08e68f450_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208308505b6e004e258d9947f08e68f450_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:07,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/74b95f0f6cbf4523b0a84b96ec7005bd, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:07,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/74b95f0f6cbf4523b0a84b96ec7005bd is 175, key is test_row_0/A:col10/1732123505232/Put/seqid=0 2024-11-20T17:25:07,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742037_1213 (size=31255) 2024-11-20T17:25:07,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:07,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123567395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:07,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:07,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123567396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:07,411 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/b01f3a12544b4d1bab0995bc6a80ff93 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b01f3a12544b4d1bab0995bc6a80ff93 2024-11-20T17:25:07,416 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/C of 51263f54befea8a9195f5613e0910232 into b01f3a12544b4d1bab0995bc6a80ff93(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:07,416 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:07,416 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/C, priority=13, startTime=1732123506922; duration=0sec 2024-11-20T17:25:07,417 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:07,417 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:C 2024-11-20T17:25:07,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123567499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:07,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123567499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:07,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:07,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123567701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:07,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:07,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123567702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:07,793 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/74b95f0f6cbf4523b0a84b96ec7005bd 2024-11-20T17:25:07,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/9466a2aed66b4ff88d7df5fc33bf3554 is 50, key is test_row_0/B:col10/1732123505232/Put/seqid=0 2024-11-20T17:25:07,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742038_1214 (size=12301) 2024-11-20T17:25:07,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:25:08,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123568004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:08,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:08,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123568005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:08,206 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/9466a2aed66b4ff88d7df5fc33bf3554 2024-11-20T17:25:08,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/c3da8658a69a4322bd9efa20ce4fa796 is 50, key is test_row_0/C:col10/1732123505232/Put/seqid=0 2024-11-20T17:25:08,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742039_1215 (size=12301) 2024-11-20T17:25:08,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:08,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123568507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:08,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:08,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123568510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:08,621 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/c3da8658a69a4322bd9efa20ce4fa796 2024-11-20T17:25:08,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/74b95f0f6cbf4523b0a84b96ec7005bd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/74b95f0f6cbf4523b0a84b96ec7005bd 2024-11-20T17:25:08,632 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/74b95f0f6cbf4523b0a84b96ec7005bd, entries=150, sequenceid=290, filesize=30.5 K 2024-11-20T17:25:08,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/9466a2aed66b4ff88d7df5fc33bf3554 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9466a2aed66b4ff88d7df5fc33bf3554 2024-11-20T17:25:08,638 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9466a2aed66b4ff88d7df5fc33bf3554, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T17:25:08,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/c3da8658a69a4322bd9efa20ce4fa796 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/c3da8658a69a4322bd9efa20ce4fa796 2024-11-20T17:25:08,645 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/c3da8658a69a4322bd9efa20ce4fa796, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T17:25:08,645 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 51263f54befea8a9195f5613e0910232 in 1714ms, sequenceid=290, compaction requested=false 2024-11-20T17:25:08,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:08,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:08,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-20T17:25:08,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-20T17:25:08,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T17:25:08,649 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9410 sec 2024-11-20T17:25:08,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.9450 sec 2024-11-20T17:25:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:09,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T17:25:09,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:25:09,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:25:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:25:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:09,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f9c40b1d91f540c48dedec27ffdd141a_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123509518/Put/seqid=0 2024-11-20T17:25:09,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:09,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123569528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:09,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:09,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123569529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:09,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742040_1216 (size=12454) 2024-11-20T17:25:09,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123569631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:09,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123569631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:09,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-20T17:25:09,810 INFO [Thread-657 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-20T17:25:09,812 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:09,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-20T17:25:09,814 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:09,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:25:09,814 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:09,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:09,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:09,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123569833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123569833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:09,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:25:09,933 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:09,939 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f9c40b1d91f540c48dedec27ffdd141a_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9c40b1d91f540c48dedec27ffdd141a_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:09,944 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/41a26916ac9a4ad1b4e11e302ce62f7d, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:09,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/41a26916ac9a4ad1b4e11e302ce62f7d is 175, key is test_row_0/A:col10/1732123509518/Put/seqid=0 2024-11-20T17:25:09,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742041_1217 (size=31255) 2024-11-20T17:25:09,966 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:09,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:09,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:09,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:09,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:09,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:09,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:09,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:25:10,119 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:10,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:10,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123570137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:10,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:10,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123570139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:10,272 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:10,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:10,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:10,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,354 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=320, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/41a26916ac9a4ad1b4e11e302ce62f7d 2024-11-20T17:25:10,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/07914cb487874fbc8133fe0ab9c5ac82 is 50, key is test_row_0/B:col10/1732123509518/Put/seqid=0 2024-11-20T17:25:10,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742042_1218 (size=12301) 2024-11-20T17:25:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:25:10,425 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:10,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:10,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:10,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,578 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:10,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:10,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:10,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:10,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40248 deadline: 1732123570641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:10,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:10,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40260 deadline: 1732123570642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:10,731 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:10,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:10,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:10,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/07914cb487874fbc8133fe0ab9c5ac82 2024-11-20T17:25:10,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/f89a10c2701743348f727b8156191605 is 50, key is test_row_0/C:col10/1732123509518/Put/seqid=0 2024-11-20T17:25:10,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742043_1219 (size=12301) 2024-11-20T17:25:10,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40238 deadline: 1732123570810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:10,812 DEBUG [Thread-647 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:10,884 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:10,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:10,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:10,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:10,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:10,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40244 deadline: 1732123570905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:10,908 DEBUG [Thread-653 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18255 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:10,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:10,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:40266 deadline: 1732123570910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:10,912 DEBUG [Thread-649 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18254 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:10,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:25:11,036 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:11,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:11,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:11,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:11,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:11,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:11,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/f89a10c2701743348f727b8156191605 2024-11-20T17:25:11,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/41a26916ac9a4ad1b4e11e302ce62f7d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/41a26916ac9a4ad1b4e11e302ce62f7d 2024-11-20T17:25:11,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:11,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:11,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/41a26916ac9a4ad1b4e11e302ce62f7d, entries=150, sequenceid=320, filesize=30.5 K 2024-11-20T17:25:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:11,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:11,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/07914cb487874fbc8133fe0ab9c5ac82 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/07914cb487874fbc8133fe0ab9c5ac82 2024-11-20T17:25:11,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/07914cb487874fbc8133fe0ab9c5ac82, entries=150, sequenceid=320, filesize=12.0 K 2024-11-20T17:25:11,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/f89a10c2701743348f727b8156191605 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f89a10c2701743348f727b8156191605 2024-11-20T17:25:11,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f89a10c2701743348f727b8156191605, entries=150, sequenceid=320, filesize=12.0 K 2024-11-20T17:25:11,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 51263f54befea8a9195f5613e0910232 in 1699ms, sequenceid=320, compaction requested=true 2024-11-20T17:25:11,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:11,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:11,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:11,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:11,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:25:11,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 51263f54befea8a9195f5613e0910232:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:11,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T17:25:11,219 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:11,220 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:11,221 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94413 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:11,221 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/A is initiating minor compaction (all files) 2024-11-20T17:25:11,221 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:11,221 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/A in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,221 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/C is initiating minor compaction (all files) 2024-11-20T17:25:11,221 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/C in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,221 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b342eebbc0a14891a2177d4840971479, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/74b95f0f6cbf4523b0a84b96ec7005bd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/41a26916ac9a4ad1b4e11e302ce62f7d] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=92.2 K 2024-11-20T17:25:11,221 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,221 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b01f3a12544b4d1bab0995bc6a80ff93, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/c3da8658a69a4322bd9efa20ce4fa796, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f89a10c2701743348f727b8156191605] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=36.7 K 2024-11-20T17:25:11,221 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b342eebbc0a14891a2177d4840971479, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/74b95f0f6cbf4523b0a84b96ec7005bd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/41a26916ac9a4ad1b4e11e302ce62f7d] 2024-11-20T17:25:11,222 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting b342eebbc0a14891a2177d4840971479, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123504618 2024-11-20T17:25:11,222 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b01f3a12544b4d1bab0995bc6a80ff93, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123504618 2024-11-20T17:25:11,222 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c3da8658a69a4322bd9efa20ce4fa796, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123505232 2024-11-20T17:25:11,222 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74b95f0f6cbf4523b0a84b96ec7005bd, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123505232 2024-11-20T17:25:11,223 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting f89a10c2701743348f727b8156191605, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732123507365 2024-11-20T17:25:11,223 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41a26916ac9a4ad1b4e11e302ce62f7d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732123507365 2024-11-20T17:25:11,231 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#C#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:11,232 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/2d71a341e73d45829e6a1c43c13ee37a is 50, key is test_row_0/C:col10/1732123509518/Put/seqid=0 2024-11-20T17:25:11,232 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:11,235 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204b3a6127db61467e940b0b3220ba7146_51263f54befea8a9195f5613e0910232 store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:11,236 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204b3a6127db61467e940b0b3220ba7146_51263f54befea8a9195f5613e0910232, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:11,236 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204b3a6127db61467e940b0b3220ba7146_51263f54befea8a9195f5613e0910232 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:11,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742044_1220 (size=13051) 2024-11-20T17:25:11,244 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/2d71a341e73d45829e6a1c43c13ee37a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/2d71a341e73d45829e6a1c43c13ee37a 2024-11-20T17:25:11,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742045_1221 (size=4469) 2024-11-20T17:25:11,246 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#A#compaction#181 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:11,247 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/8d9b4d6fcaae4b878da2b98007bd9a9d is 175, key is test_row_0/A:col10/1732123509518/Put/seqid=0 2024-11-20T17:25:11,249 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/C of 51263f54befea8a9195f5613e0910232 into 2d71a341e73d45829e6a1c43c13ee37a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:11,249 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:11,249 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/C, priority=13, startTime=1732123511219; duration=0sec 2024-11-20T17:25:11,250 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:11,250 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:C 2024-11-20T17:25:11,250 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:11,251 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:11,251 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 51263f54befea8a9195f5613e0910232/B is initiating minor compaction (all files) 2024-11-20T17:25:11,251 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 51263f54befea8a9195f5613e0910232/B in TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,251 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/94521670fd674f288f7eb1c8498608b3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9466a2aed66b4ff88d7df5fc33bf3554, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/07914cb487874fbc8133fe0ab9c5ac82] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp, totalSize=36.7 K 2024-11-20T17:25:11,252 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 94521670fd674f288f7eb1c8498608b3, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123504618 2024-11-20T17:25:11,252 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9466a2aed66b4ff88d7df5fc33bf3554, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732123505232 2024-11-20T17:25:11,253 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 07914cb487874fbc8133fe0ab9c5ac82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732123507365 2024-11-20T17:25:11,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742046_1222 (size=32005) 2024-11-20T17:25:11,261 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/8d9b4d6fcaae4b878da2b98007bd9a9d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/8d9b4d6fcaae4b878da2b98007bd9a9d 2024-11-20T17:25:11,262 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 51263f54befea8a9195f5613e0910232#B#compaction#182 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:11,262 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/3b50606b9cb64a5883c262c10455ea27 is 50, key is test_row_0/B:col10/1732123509518/Put/seqid=0 2024-11-20T17:25:11,264 DEBUG [Thread-658 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x29dad7a8 to 127.0.0.1:56028 2024-11-20T17:25:11,264 DEBUG [Thread-658 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:11,264 DEBUG [Thread-662 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x250a1de4 to 127.0.0.1:56028 2024-11-20T17:25:11,265 DEBUG [Thread-662 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:11,265 DEBUG [Thread-660 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62c6fdab to 127.0.0.1:56028 2024-11-20T17:25:11,265 DEBUG [Thread-660 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:11,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742047_1223 (size=13051) 2024-11-20T17:25:11,268 DEBUG [Thread-664 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49456175 to 127.0.0.1:56028 2024-11-20T17:25:11,268 DEBUG [Thread-664 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:11,268 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/A of 51263f54befea8a9195f5613e0910232 into 8d9b4d6fcaae4b878da2b98007bd9a9d(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:11,269 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:11,269 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/A, priority=13, startTime=1732123511219; duration=0sec 2024-11-20T17:25:11,269 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:11,269 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:A 2024-11-20T17:25:11,273 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/3b50606b9cb64a5883c262c10455ea27 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3b50606b9cb64a5883c262c10455ea27 2024-11-20T17:25:11,277 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 51263f54befea8a9195f5613e0910232/B of 51263f54befea8a9195f5613e0910232 into 3b50606b9cb64a5883c262c10455ea27(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:11,277 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:11,277 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232., storeName=51263f54befea8a9195f5613e0910232/B, priority=13, startTime=1732123511219; duration=0sec 2024-11-20T17:25:11,278 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:11,278 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 51263f54befea8a9195f5613e0910232:B 2024-11-20T17:25:11,351 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:11,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-20T17:25:11,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:11,351 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T17:25:11,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:25:11,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:11,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:25:11,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:11,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:25:11,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:11,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b85a3796cbf94225892ae1cddbd8f257_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123509523/Put/seqid=0 2024-11-20T17:25:11,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742048_1224 (size=12454) 2024-11-20T17:25:11,646 DEBUG [Thread-651 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x28c904d8 to 127.0.0.1:56028 2024-11-20T17:25:11,646 DEBUG [Thread-651 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:11,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:11,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. as already flushing 2024-11-20T17:25:11,652 DEBUG [Thread-655 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a63fed4 to 127.0.0.1:56028 2024-11-20T17:25:11,652 DEBUG [Thread-655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:11,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:11,766 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b85a3796cbf94225892ae1cddbd8f257_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b85a3796cbf94225892ae1cddbd8f257_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:11,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/463a73bf1fa2492b8561f531f89cdcb2, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:11,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/463a73bf1fa2492b8561f531f89cdcb2 is 175, key is test_row_0/A:col10/1732123509523/Put/seqid=0 2024-11-20T17:25:11,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742049_1225 (size=31255) 2024-11-20T17:25:11,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:25:12,172 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/463a73bf1fa2492b8561f531f89cdcb2 2024-11-20T17:25:12,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/92d49b007df74f2d9935c968f81a93bc is 50, key is test_row_0/B:col10/1732123509523/Put/seqid=0 2024-11-20T17:25:12,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742050_1226 (size=12301) 2024-11-20T17:25:12,584 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/92d49b007df74f2d9935c968f81a93bc 2024-11-20T17:25:12,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/dbf35572d1964d328f98b74683a4c88a is 50, key is test_row_0/C:col10/1732123509523/Put/seqid=0 2024-11-20T17:25:12,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742051_1227 (size=12301) 2024-11-20T17:25:12,995 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/dbf35572d1964d328f98b74683a4c88a 2024-11-20T17:25:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/463a73bf1fa2492b8561f531f89cdcb2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/463a73bf1fa2492b8561f531f89cdcb2 2024-11-20T17:25:13,004 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/463a73bf1fa2492b8561f531f89cdcb2, entries=150, sequenceid=332, filesize=30.5 K 2024-11-20T17:25:13,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/92d49b007df74f2d9935c968f81a93bc as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/92d49b007df74f2d9935c968f81a93bc 2024-11-20T17:25:13,009 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/92d49b007df74f2d9935c968f81a93bc, entries=150, sequenceid=332, filesize=12.0 K 2024-11-20T17:25:13,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/dbf35572d1964d328f98b74683a4c88a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/dbf35572d1964d328f98b74683a4c88a 2024-11-20T17:25:13,013 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/dbf35572d1964d328f98b74683a4c88a, entries=150, sequenceid=332, filesize=12.0 K 2024-11-20T17:25:13,014 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=13.42 KB/13740 for 51263f54befea8a9195f5613e0910232 in 1663ms, sequenceid=332, compaction requested=false 2024-11-20T17:25:13,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:13,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:13,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-20T17:25:13,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-20T17:25:13,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-20T17:25:13,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2000 sec 2024-11-20T17:25:13,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 3.2040 sec 2024-11-20T17:25:13,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:25:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T17:25:13,919 INFO [Thread-657 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T17:25:19,590 DEBUG [master/d514dc944523:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region e60665fdde91447ed275607cc98db134 changed from -1.0 to 0.0, refreshing cache 2024-11-20T17:25:20,887 DEBUG [Thread-647 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a0aa7d7 to 127.0.0.1:56028 2024-11-20T17:25:20,887 DEBUG [Thread-647 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:20,974 DEBUG [Thread-653 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d6eb994 to 127.0.0.1:56028 2024-11-20T17:25:20,974 DEBUG [Thread-653 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:20,976 DEBUG [Thread-649 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0231f064 to 127.0.0.1:56028 2024-11-20T17:25:20,976 DEBUG [Thread-649 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:20,976 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:25:20,976 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-11-20T17:25:20,976 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-11-20T17:25:20,976 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 114 2024-11-20T17:25:20,976 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-11-20T17:25:20,976 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 108 2024-11-20T17:25:20,976 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:25:20,977 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7874 2024-11-20T17:25:20,977 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7645 2024-11-20T17:25:20,977 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:25:20,977 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3302 2024-11-20T17:25:20,977 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9906 rows 2024-11-20T17:25:20,977 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3294 2024-11-20T17:25:20,977 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9882 rows 2024-11-20T17:25:20,977 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:25:20,977 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f3c14c0 to 127.0.0.1:56028 2024-11-20T17:25:20,977 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:20,979 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:25:20,980 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:25:20,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:20,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:25:20,984 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123520983"}]},"ts":"1732123520983"} 2024-11-20T17:25:20,985 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:25:20,988 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:25:20,989 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:25:20,990 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, UNASSIGN}] 2024-11-20T17:25:20,991 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, UNASSIGN 2024-11-20T17:25:20,991 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=51263f54befea8a9195f5613e0910232, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:25:20,992 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:25:20,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure 51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:25:21,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:25:21,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:21,144 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:21,144 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:25:21,144 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing 51263f54befea8a9195f5613e0910232, disabling compactions & flushes 2024-11-20T17:25:21,144 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:21,144 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:21,144 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. after waiting 0 ms 2024-11-20T17:25:21,144 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:21,144 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing 51263f54befea8a9195f5613e0910232 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:25:21,145 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=A 2024-11-20T17:25:21,145 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:21,145 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=B 2024-11-20T17:25:21,145 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:21,145 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 51263f54befea8a9195f5613e0910232, store=C 2024-11-20T17:25:21,145 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:21,152 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f4494fa88e844b1cbcc971f9daa17e10_51263f54befea8a9195f5613e0910232 is 50, key is test_row_0/A:col10/1732123520975/Put/seqid=0 2024-11-20T17:25:21,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742052_1228 (size=12454) 2024-11-20T17:25:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:25:21,556 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:21,561 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f4494fa88e844b1cbcc971f9daa17e10_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f4494fa88e844b1cbcc971f9daa17e10_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:21,562 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/26959d57fe8e4a338f580558aae574c4, store: [table=TestAcidGuarantees family=A region=51263f54befea8a9195f5613e0910232] 2024-11-20T17:25:21,562 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/26959d57fe8e4a338f580558aae574c4 is 175, key is test_row_0/A:col10/1732123520975/Put/seqid=0 2024-11-20T17:25:21,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742053_1229 (size=31255) 2024-11-20T17:25:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:25:21,967 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=340, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/26959d57fe8e4a338f580558aae574c4 2024-11-20T17:25:21,973 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/507415c105e34c26ad89bdc12f196e8f is 50, key is test_row_0/B:col10/1732123520975/Put/seqid=0 2024-11-20T17:25:21,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742054_1230 (size=12301) 2024-11-20T17:25:22,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:25:22,377 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/507415c105e34c26ad89bdc12f196e8f 2024-11-20T17:25:22,385 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/3522df28c1b843d79615a1d3db8fa078 is 50, key is test_row_0/C:col10/1732123520975/Put/seqid=0 2024-11-20T17:25:22,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742055_1231 (size=12301) 2024-11-20T17:25:22,789 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/3522df28c1b843d79615a1d3db8fa078 2024-11-20T17:25:22,793 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/A/26959d57fe8e4a338f580558aae574c4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/26959d57fe8e4a338f580558aae574c4 2024-11-20T17:25:22,797 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/26959d57fe8e4a338f580558aae574c4, entries=150, sequenceid=340, filesize=30.5 K 2024-11-20T17:25:22,798 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/B/507415c105e34c26ad89bdc12f196e8f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/507415c105e34c26ad89bdc12f196e8f 2024-11-20T17:25:22,801 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/507415c105e34c26ad89bdc12f196e8f, entries=150, sequenceid=340, filesize=12.0 K 2024-11-20T17:25:22,802 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/.tmp/C/3522df28c1b843d79615a1d3db8fa078 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3522df28c1b843d79615a1d3db8fa078 2024-11-20T17:25:22,806 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3522df28c1b843d79615a1d3db8fa078, entries=150, sequenceid=340, filesize=12.0 K 2024-11-20T17:25:22,807 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 51263f54befea8a9195f5613e0910232 in 1662ms, sequenceid=340, compaction requested=true 2024-11-20T17:25:22,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/871e71c616584f02bafdeaa5c614c9c7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/c100d5bab1114bdd8166489f6bf4dc00, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/143765aacfce48f894405a37ac5d7183, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/cc242a97ad5f4547b323947efd24ce73, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3928c71aa0bd44f581b5b51f26f28d6e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/d8dab2181096478389242005107643c0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/0f4f47ee1f9f409296a9f8d80a6eb09c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/45c5294dd0464ec69bb5fa62b63ad173, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a46b18bc2b3d458abaafe01fdc5d826c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/9ccff51f5a644a038a447c3566d5d1ec, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/49c6733d93d842279e1becdb51661282, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a9bb9212e59746d1b72b8dffef7b28cf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3f92cac958dc45d3874bb25072ff58e4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/71cb6fdefaca477a8d19965000dab144, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b12acf2e3ddc443cbbe0514cc92bc243, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5295988cf7e64fa9aa1d9b19ec92e17f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/fb96f8255d5d447c89deb99e36854af6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5f99e7099b644dc9b2e152bcd9715cc9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/32a0cc4eb7a54de3ad2670f51042678c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b342eebbc0a14891a2177d4840971479, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/74b95f0f6cbf4523b0a84b96ec7005bd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/41a26916ac9a4ad1b4e11e302ce62f7d] to archive 2024-11-20T17:25:22,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:25:22,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/871e71c616584f02bafdeaa5c614c9c7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/871e71c616584f02bafdeaa5c614c9c7 2024-11-20T17:25:22,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/c100d5bab1114bdd8166489f6bf4dc00 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/c100d5bab1114bdd8166489f6bf4dc00 2024-11-20T17:25:22,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/143765aacfce48f894405a37ac5d7183 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/143765aacfce48f894405a37ac5d7183 2024-11-20T17:25:22,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/cc242a97ad5f4547b323947efd24ce73 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/cc242a97ad5f4547b323947efd24ce73 2024-11-20T17:25:22,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3928c71aa0bd44f581b5b51f26f28d6e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3928c71aa0bd44f581b5b51f26f28d6e 2024-11-20T17:25:22,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/d8dab2181096478389242005107643c0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/d8dab2181096478389242005107643c0 2024-11-20T17:25:22,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/0f4f47ee1f9f409296a9f8d80a6eb09c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/0f4f47ee1f9f409296a9f8d80a6eb09c 2024-11-20T17:25:22,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/45c5294dd0464ec69bb5fa62b63ad173 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/45c5294dd0464ec69bb5fa62b63ad173 2024-11-20T17:25:22,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a46b18bc2b3d458abaafe01fdc5d826c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a46b18bc2b3d458abaafe01fdc5d826c 2024-11-20T17:25:22,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/9ccff51f5a644a038a447c3566d5d1ec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/9ccff51f5a644a038a447c3566d5d1ec 2024-11-20T17:25:22,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/49c6733d93d842279e1becdb51661282 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/49c6733d93d842279e1becdb51661282 2024-11-20T17:25:22,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a9bb9212e59746d1b72b8dffef7b28cf to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/a9bb9212e59746d1b72b8dffef7b28cf 2024-11-20T17:25:22,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3f92cac958dc45d3874bb25072ff58e4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/3f92cac958dc45d3874bb25072ff58e4 2024-11-20T17:25:22,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/71cb6fdefaca477a8d19965000dab144 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/71cb6fdefaca477a8d19965000dab144 2024-11-20T17:25:22,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b12acf2e3ddc443cbbe0514cc92bc243 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b12acf2e3ddc443cbbe0514cc92bc243 2024-11-20T17:25:22,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5295988cf7e64fa9aa1d9b19ec92e17f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5295988cf7e64fa9aa1d9b19ec92e17f 2024-11-20T17:25:22,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/fb96f8255d5d447c89deb99e36854af6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/fb96f8255d5d447c89deb99e36854af6 2024-11-20T17:25:22,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5f99e7099b644dc9b2e152bcd9715cc9 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/5f99e7099b644dc9b2e152bcd9715cc9 2024-11-20T17:25:22,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/32a0cc4eb7a54de3ad2670f51042678c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/32a0cc4eb7a54de3ad2670f51042678c 2024-11-20T17:25:22,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b342eebbc0a14891a2177d4840971479 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/b342eebbc0a14891a2177d4840971479 2024-11-20T17:25:22,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/74b95f0f6cbf4523b0a84b96ec7005bd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/74b95f0f6cbf4523b0a84b96ec7005bd 2024-11-20T17:25:22,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/41a26916ac9a4ad1b4e11e302ce62f7d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/41a26916ac9a4ad1b4e11e302ce62f7d 2024-11-20T17:25:22,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4c27577e0f33486fa636eddead4f1ac0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4fff0ab6d10b41b4833f44e07cff126d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/a4dcfe75a97044fb8eb43c355f12f326, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9fd868ddd4254575b1d966632b60bf0a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/628215422a81439988d878ff74403f0a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/fcdf7cc649ab4259b43ab11034a39abd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/d56726b756e4437997a5a7b60a1408fa, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/761b0500b2934750939ce6d70980bfe2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/22cac85029c549a1bca145b8360f5768, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/6570ee5c8bcc401397330aaab8829af2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/225e51ad6fe041b5be7d423d6b996144, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/8a09f1bcd8204dc69e618870a30d1bd6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7665bb49ce554ef6891aaf587658a8a3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3e259d8286f1458fa5f92390a347f7e8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/ae77d136da7e4b86ac125cf748fbdaee, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/773334aad75d462d8140a634acf6282d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7ded997ee735499b8e08fbf93919a1fb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/002b9d0c4c814b07811a49b6f0b318d4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/94521670fd674f288f7eb1c8498608b3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/094299b218ca4445960cfaf4a8559de7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9466a2aed66b4ff88d7df5fc33bf3554, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/07914cb487874fbc8133fe0ab9c5ac82] to archive 2024-11-20T17:25:22,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:25:22,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4c27577e0f33486fa636eddead4f1ac0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4c27577e0f33486fa636eddead4f1ac0 2024-11-20T17:25:22,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4fff0ab6d10b41b4833f44e07cff126d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/4fff0ab6d10b41b4833f44e07cff126d 2024-11-20T17:25:22,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/a4dcfe75a97044fb8eb43c355f12f326 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/a4dcfe75a97044fb8eb43c355f12f326 2024-11-20T17:25:22,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9fd868ddd4254575b1d966632b60bf0a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9fd868ddd4254575b1d966632b60bf0a 2024-11-20T17:25:22,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/628215422a81439988d878ff74403f0a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/628215422a81439988d878ff74403f0a 2024-11-20T17:25:22,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/fcdf7cc649ab4259b43ab11034a39abd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/fcdf7cc649ab4259b43ab11034a39abd 2024-11-20T17:25:22,843 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/d56726b756e4437997a5a7b60a1408fa to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/d56726b756e4437997a5a7b60a1408fa 2024-11-20T17:25:22,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/761b0500b2934750939ce6d70980bfe2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/761b0500b2934750939ce6d70980bfe2 2024-11-20T17:25:22,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/22cac85029c549a1bca145b8360f5768 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/22cac85029c549a1bca145b8360f5768 2024-11-20T17:25:22,846 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/6570ee5c8bcc401397330aaab8829af2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/6570ee5c8bcc401397330aaab8829af2 2024-11-20T17:25:22,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/225e51ad6fe041b5be7d423d6b996144 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/225e51ad6fe041b5be7d423d6b996144 2024-11-20T17:25:22,848 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/8a09f1bcd8204dc69e618870a30d1bd6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/8a09f1bcd8204dc69e618870a30d1bd6 2024-11-20T17:25:22,849 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7665bb49ce554ef6891aaf587658a8a3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7665bb49ce554ef6891aaf587658a8a3 2024-11-20T17:25:22,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3e259d8286f1458fa5f92390a347f7e8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3e259d8286f1458fa5f92390a347f7e8 2024-11-20T17:25:22,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/ae77d136da7e4b86ac125cf748fbdaee to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/ae77d136da7e4b86ac125cf748fbdaee 2024-11-20T17:25:22,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/773334aad75d462d8140a634acf6282d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/773334aad75d462d8140a634acf6282d 2024-11-20T17:25:22,853 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7ded997ee735499b8e08fbf93919a1fb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/7ded997ee735499b8e08fbf93919a1fb 2024-11-20T17:25:22,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/002b9d0c4c814b07811a49b6f0b318d4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/002b9d0c4c814b07811a49b6f0b318d4 2024-11-20T17:25:22,855 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/94521670fd674f288f7eb1c8498608b3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/94521670fd674f288f7eb1c8498608b3 2024-11-20T17:25:22,857 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/094299b218ca4445960cfaf4a8559de7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/094299b218ca4445960cfaf4a8559de7 2024-11-20T17:25:22,858 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9466a2aed66b4ff88d7df5fc33bf3554 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/9466a2aed66b4ff88d7df5fc33bf3554 2024-11-20T17:25:22,859 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/07914cb487874fbc8133fe0ab9c5ac82 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/07914cb487874fbc8133fe0ab9c5ac82 2024-11-20T17:25:22,860 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3c1d560d12c549dbb03b111efd4f3562, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/7f3f21ae53fb460080391ddcceeedb59, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/68cec9e5e2294620ac659cf6df4a9ad4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/48a37ac8e34a429097abe54a5fabf864, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/15e4ce9689c547839ff3d82508f2f2f3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/ac484458eb1e43ccbeb227c54f367c18, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/da866ab17f0745d2a13b4f3470285af1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f72b10d431dc41708ed13eb6a10d5c2d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1202c0e412d44725b92e4a8b772711a3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/9071b03f6e794b7cb374e7a92423983a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b3b606df97ab42e39b1d7d86c9bf22e4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/6b8753b60e0c4c7ea619e21ecdd729b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1f3378f5a73b4c6983ca2d7a51aa4a7c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/24e592c8ac21458eb16047468ca93d41, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b48be762190e4119acaf509be1c7c72c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/4e98c51470cf437bb218f629f8a84e13, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/69874b7c73d440e3a0dd6f23a8d06f06, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/feb4d653d7574243b33b90bd9ad8d559, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b01f3a12544b4d1bab0995bc6a80ff93, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/492396feaed94a7d869fba5450aa5e2c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/c3da8658a69a4322bd9efa20ce4fa796, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f89a10c2701743348f727b8156191605] to archive 2024-11-20T17:25:22,861 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:25:22,863 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3c1d560d12c549dbb03b111efd4f3562 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3c1d560d12c549dbb03b111efd4f3562 2024-11-20T17:25:22,864 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/7f3f21ae53fb460080391ddcceeedb59 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/7f3f21ae53fb460080391ddcceeedb59 2024-11-20T17:25:22,865 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/68cec9e5e2294620ac659cf6df4a9ad4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/68cec9e5e2294620ac659cf6df4a9ad4 2024-11-20T17:25:22,867 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/48a37ac8e34a429097abe54a5fabf864 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/48a37ac8e34a429097abe54a5fabf864 2024-11-20T17:25:22,868 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/15e4ce9689c547839ff3d82508f2f2f3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/15e4ce9689c547839ff3d82508f2f2f3 2024-11-20T17:25:22,869 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/ac484458eb1e43ccbeb227c54f367c18 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/ac484458eb1e43ccbeb227c54f367c18 2024-11-20T17:25:22,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/da866ab17f0745d2a13b4f3470285af1 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/da866ab17f0745d2a13b4f3470285af1 2024-11-20T17:25:22,871 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f72b10d431dc41708ed13eb6a10d5c2d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f72b10d431dc41708ed13eb6a10d5c2d 2024-11-20T17:25:22,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1202c0e412d44725b92e4a8b772711a3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1202c0e412d44725b92e4a8b772711a3 2024-11-20T17:25:22,873 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/9071b03f6e794b7cb374e7a92423983a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/9071b03f6e794b7cb374e7a92423983a 2024-11-20T17:25:22,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b3b606df97ab42e39b1d7d86c9bf22e4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b3b606df97ab42e39b1d7d86c9bf22e4 2024-11-20T17:25:22,876 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/6b8753b60e0c4c7ea619e21ecdd729b2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/6b8753b60e0c4c7ea619e21ecdd729b2 2024-11-20T17:25:22,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1f3378f5a73b4c6983ca2d7a51aa4a7c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/1f3378f5a73b4c6983ca2d7a51aa4a7c 2024-11-20T17:25:22,878 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/24e592c8ac21458eb16047468ca93d41 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/24e592c8ac21458eb16047468ca93d41 2024-11-20T17:25:22,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b48be762190e4119acaf509be1c7c72c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b48be762190e4119acaf509be1c7c72c 2024-11-20T17:25:22,880 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/4e98c51470cf437bb218f629f8a84e13 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/4e98c51470cf437bb218f629f8a84e13 2024-11-20T17:25:22,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/69874b7c73d440e3a0dd6f23a8d06f06 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/69874b7c73d440e3a0dd6f23a8d06f06 2024-11-20T17:25:22,882 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/feb4d653d7574243b33b90bd9ad8d559 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/feb4d653d7574243b33b90bd9ad8d559 2024-11-20T17:25:22,883 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b01f3a12544b4d1bab0995bc6a80ff93 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/b01f3a12544b4d1bab0995bc6a80ff93 2024-11-20T17:25:22,885 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/492396feaed94a7d869fba5450aa5e2c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/492396feaed94a7d869fba5450aa5e2c 2024-11-20T17:25:22,886 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/c3da8658a69a4322bd9efa20ce4fa796 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/c3da8658a69a4322bd9efa20ce4fa796 2024-11-20T17:25:22,887 DEBUG [StoreCloser-TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f89a10c2701743348f727b8156191605 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/f89a10c2701743348f727b8156191605 2024-11-20T17:25:22,891 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/recovered.edits/343.seqid, newMaxSeqId=343, maxSeqId=4 2024-11-20T17:25:22,892 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232. 2024-11-20T17:25:22,892 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for 51263f54befea8a9195f5613e0910232: 2024-11-20T17:25:22,893 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed 51263f54befea8a9195f5613e0910232 2024-11-20T17:25:22,894 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=51263f54befea8a9195f5613e0910232, regionState=CLOSED 2024-11-20T17:25:22,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T17:25:22,896 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure 51263f54befea8a9195f5613e0910232, server=d514dc944523,44015,1732123455293 in 1.9020 sec 2024-11-20T17:25:22,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-11-20T17:25:22,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=51263f54befea8a9195f5613e0910232, UNASSIGN in 1.9050 sec 2024-11-20T17:25:22,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T17:25:22,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9080 sec 2024-11-20T17:25:22,899 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123522899"}]},"ts":"1732123522899"} 2024-11-20T17:25:22,900 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:25:22,902 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:25:22,903 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9220 sec 2024-11-20T17:25:23,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-20T17:25:23,087 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-20T17:25:23,088 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:25:23,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:23,089 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:23,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:25:23,090 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:23,091 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,094 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/recovered.edits] 2024-11-20T17:25:23,096 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/26959d57fe8e4a338f580558aae574c4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/26959d57fe8e4a338f580558aae574c4 2024-11-20T17:25:23,098 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/463a73bf1fa2492b8561f531f89cdcb2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/463a73bf1fa2492b8561f531f89cdcb2 2024-11-20T17:25:23,099 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/8d9b4d6fcaae4b878da2b98007bd9a9d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/A/8d9b4d6fcaae4b878da2b98007bd9a9d 2024-11-20T17:25:23,101 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3b50606b9cb64a5883c262c10455ea27 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/3b50606b9cb64a5883c262c10455ea27 2024-11-20T17:25:23,102 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/507415c105e34c26ad89bdc12f196e8f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/507415c105e34c26ad89bdc12f196e8f 2024-11-20T17:25:23,103 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/92d49b007df74f2d9935c968f81a93bc to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/B/92d49b007df74f2d9935c968f81a93bc 2024-11-20T17:25:23,106 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/2d71a341e73d45829e6a1c43c13ee37a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/2d71a341e73d45829e6a1c43c13ee37a 2024-11-20T17:25:23,107 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3522df28c1b843d79615a1d3db8fa078 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/3522df28c1b843d79615a1d3db8fa078 2024-11-20T17:25:23,108 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/dbf35572d1964d328f98b74683a4c88a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/C/dbf35572d1964d328f98b74683a4c88a 2024-11-20T17:25:23,110 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/recovered.edits/343.seqid to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232/recovered.edits/343.seqid 2024-11-20T17:25:23,111 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,111 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:25:23,111 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:25:23,112 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T17:25:23,116 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120042dae3fe03b43beb7f08abc0f426823_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120042dae3fe03b43beb7f08abc0f426823_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,117 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004f4c4442e66413ea017c6200a25c0b3_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112004f4c4442e66413ea017c6200a25c0b3_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,118 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200605816524994a37bb4ad5c9dd7215a1_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200605816524994a37bb4ad5c9dd7215a1_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,119 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112008b97323a64a45f597517b58030b7af1_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112008b97323a64a45f597517b58030b7af1_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,120 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200aa4b6018ed440dc932ee1e21ceaea02_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200aa4b6018ed440dc932ee1e21ceaea02_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,121 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201acf51dbbc7f44e582b60e3ad7608a8c_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201acf51dbbc7f44e582b60e3ad7608a8c_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,122 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112064ac161c81c24716b41f4260ddab96fa_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112064ac161c81c24716b41f4260ddab96fa_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,123 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207a9895b25dc14793911d6de44243004d_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207a9895b25dc14793911d6de44243004d_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,124 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208308505b6e004e258d9947f08e68f450_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208308505b6e004e258d9947f08e68f450_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,125 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112090c9efacc9124325920c57a80b3299c7_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112090c9efacc9124325920c57a80b3299c7_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,126 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4c8bd1c098948a19079277fdbd33206_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a4c8bd1c098948a19079277fdbd33206_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,127 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af4e3a4f6c8f4162b36ec636c2bb102f_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120af4e3a4f6c8f4162b36ec636c2bb102f_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,128 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b85a3796cbf94225892ae1cddbd8f257_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b85a3796cbf94225892ae1cddbd8f257_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,130 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bcb519f37c9e47b6988346da737905a3_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bcb519f37c9e47b6988346da737905a3_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,131 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120de83c6e4d7114e109a77488621bb24cc_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120de83c6e4d7114e109a77488621bb24cc_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,132 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f40898354ab043ddbcd935d8fbeac0fa_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f40898354ab043ddbcd935d8fbeac0fa_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,133 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f4494fa88e844b1cbcc971f9daa17e10_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f4494fa88e844b1cbcc971f9daa17e10_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,134 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9c40b1d91f540c48dedec27ffdd141a_51263f54befea8a9195f5613e0910232 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f9c40b1d91f540c48dedec27ffdd141a_51263f54befea8a9195f5613e0910232 2024-11-20T17:25:23,135 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:25:23,137 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:23,139 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:25:23,141 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:25:23,142 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:23,142 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:25:23,142 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123523142"}]},"ts":"9223372036854775807"} 2024-11-20T17:25:23,144 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:25:23,144 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 51263f54befea8a9195f5613e0910232, NAME => 'TestAcidGuarantees,,1732123488154.51263f54befea8a9195f5613e0910232.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:25:23,144 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:25:23,144 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123523144"}]},"ts":"9223372036854775807"} 2024-11-20T17:25:23,146 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:25:23,148 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:23,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 60 msec 2024-11-20T17:25:23,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T17:25:23,191 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T17:25:23,200 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=237 (was 240), OpenFileDescriptor=444 (was 458), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=258 (was 280), ProcessCount=11 (was 11), AvailableMemoryMB=6210 (was 6309) 2024-11-20T17:25:23,208 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=258, ProcessCount=11, AvailableMemoryMB=6210 2024-11-20T17:25:23,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:25:23,210 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:25:23,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:23,212 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:25:23,212 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:23,212 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-11-20T17:25:23,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T17:25:23,212 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:25:23,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742056_1232 (size=963) 2024-11-20T17:25:23,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T17:25:23,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T17:25:23,620 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:25:23,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742057_1233 (size=53) 2024-11-20T17:25:23,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T17:25:24,026 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:25:24,026 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9889f1e232eeda308e27b8e2d47795bd, disabling compactions & flushes 2024-11-20T17:25:24,026 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:24,026 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:24,026 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. after waiting 0 ms 2024-11-20T17:25:24,026 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:24,026 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:24,026 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:24,027 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:25:24,027 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123524027"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123524027"}]},"ts":"1732123524027"} 2024-11-20T17:25:24,028 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:25:24,029 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:25:24,029 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123524029"}]},"ts":"1732123524029"} 2024-11-20T17:25:24,030 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:25:24,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9889f1e232eeda308e27b8e2d47795bd, ASSIGN}] 2024-11-20T17:25:24,034 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9889f1e232eeda308e27b8e2d47795bd, ASSIGN 2024-11-20T17:25:24,035 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9889f1e232eeda308e27b8e2d47795bd, ASSIGN; state=OFFLINE, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=false 2024-11-20T17:25:24,185 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=9889f1e232eeda308e27b8e2d47795bd, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:25:24,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:25:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T17:25:24,338 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:24,341 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:24,341 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:25:24,342 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,342 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:25:24,342 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,342 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,343 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,345 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:25:24,345 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9889f1e232eeda308e27b8e2d47795bd columnFamilyName A 2024-11-20T17:25:24,345 DEBUG [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:24,345 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.HStore(327): Store=9889f1e232eeda308e27b8e2d47795bd/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:25:24,345 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,346 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:25:24,347 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9889f1e232eeda308e27b8e2d47795bd columnFamilyName B 2024-11-20T17:25:24,347 DEBUG [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:24,347 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.HStore(327): Store=9889f1e232eeda308e27b8e2d47795bd/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:25:24,347 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,348 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:25:24,348 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9889f1e232eeda308e27b8e2d47795bd columnFamilyName C 2024-11-20T17:25:24,348 DEBUG [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:24,349 INFO [StoreOpener-9889f1e232eeda308e27b8e2d47795bd-1 {}] regionserver.HStore(327): Store=9889f1e232eeda308e27b8e2d47795bd/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:25:24,349 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:24,350 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,350 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,351 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:25:24,353 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:24,355 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:25:24,355 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 9889f1e232eeda308e27b8e2d47795bd; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59485899, jitterRate=-0.1135910302400589}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:25:24,356 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:24,357 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., pid=66, masterSystemTime=1732123524338 2024-11-20T17:25:24,358 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:24,358 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:24,359 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=9889f1e232eeda308e27b8e2d47795bd, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:25:24,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T17:25:24,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 in 173 msec 2024-11-20T17:25:24,362 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-20T17:25:24,362 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9889f1e232eeda308e27b8e2d47795bd, ASSIGN in 326 msec 2024-11-20T17:25:24,362 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:25:24,362 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123524362"}]},"ts":"1732123524362"} 2024-11-20T17:25:24,363 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:25:24,365 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:25:24,366 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-11-20T17:25:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-11-20T17:25:25,316 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-11-20T17:25:25,318 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72a7721c to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@faa31c 2024-11-20T17:25:25,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@688f4c53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,323 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,324 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,325 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:25:25,326 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60260, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:25:25,328 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7792c763 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22a568ce 2024-11-20T17:25:25,331 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a451d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,333 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c8a18c7 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e0e280 2024-11-20T17:25:25,335 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67f02d8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,336 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45426917 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@473477dd 2024-11-20T17:25:25,342 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21cebefa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,343 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e7fc60d to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a91dc80 2024-11-20T17:25:25,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e7c8846, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,347 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e66ea50 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a874cc0 2024-11-20T17:25:25,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4093d76e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,351 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6119e7 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31178bc2 2024-11-20T17:25:25,353 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2838b88d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,354 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7507573f to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78439bc6 2024-11-20T17:25:25,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15d2a893, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,358 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e5c7476 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a2545d0 2024-11-20T17:25:25,361 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab3f837, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,362 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df84068 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d039dc2 2024-11-20T17:25:25,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2834a215, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,366 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x644774bd to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15db087a 2024-11-20T17:25:25,369 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@187234de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:25,373 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-11-20T17:25:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:25:25,374 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:25,375 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:25,375 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:25,380 DEBUG [hconnection-0x1a144a97-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,380 DEBUG [hconnection-0x4e82be12-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,380 DEBUG [hconnection-0x3168fae0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,380 DEBUG [hconnection-0x2e96932d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,381 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,381 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,381 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49602, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,382 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,389 DEBUG [hconnection-0xa092da9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:25,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:25:25,389 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:25,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:25,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:25,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:25,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:25,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:25,397 DEBUG [hconnection-0x3e0694bc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,398 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49638, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123585406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123585407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123585407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,418 DEBUG [hconnection-0x724fba4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,420 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123585423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/0d8a536fe969488cae799164c7ce2df0 is 50, key is test_row_1/A:col10/1732123525386/Put/seqid=0 2024-11-20T17:25:25,448 DEBUG [hconnection-0x2c4e044e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,450 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49644, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,457 DEBUG [hconnection-0x3875d84e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,459 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49654, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,460 DEBUG [hconnection-0x4e239f4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:25,461 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:25,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742058_1234 (size=9657) 2024-11-20T17:25:25,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/0d8a536fe969488cae799164c7ce2df0 2024-11-20T17:25:25,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123585464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:25:25,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/9ecd303d61a04429a7127b1e9910d8ac is 50, key is test_row_1/B:col10/1732123525386/Put/seqid=0 2024-11-20T17:25:25,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123585508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123585508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123585510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742059_1235 (size=9657) 2024-11-20T17:25:25,526 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:25,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:25,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:25,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:25,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:25,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123585529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123585567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:25:25,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:25,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:25,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:25,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:25,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:25,681 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123585711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123585711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123585713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123585731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123585769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:25,833 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:25,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:25,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:25,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:25,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:25,834 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/9ecd303d61a04429a7127b1e9910d8ac 2024-11-20T17:25:25,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/0a23ebb35f4f4b528958717712cf93cb is 50, key is test_row_1/C:col10/1732123525386/Put/seqid=0 2024-11-20T17:25:25,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742060_1236 (size=9657) 2024-11-20T17:25:25,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/0a23ebb35f4f4b528958717712cf93cb 2024-11-20T17:25:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:25:25,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/0d8a536fe969488cae799164c7ce2df0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/0d8a536fe969488cae799164c7ce2df0 2024-11-20T17:25:25,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/0d8a536fe969488cae799164c7ce2df0, entries=100, sequenceid=12, filesize=9.4 K 2024-11-20T17:25:25,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/9ecd303d61a04429a7127b1e9910d8ac as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/9ecd303d61a04429a7127b1e9910d8ac 2024-11-20T17:25:25,986 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:25,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:25,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:25,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:25,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:25,987 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:25,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/9ecd303d61a04429a7127b1e9910d8ac, entries=100, sequenceid=12, filesize=9.4 K 2024-11-20T17:25:25,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/0a23ebb35f4f4b528958717712cf93cb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0a23ebb35f4f4b528958717712cf93cb 2024-11-20T17:25:26,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0a23ebb35f4f4b528958717712cf93cb, entries=100, sequenceid=12, filesize=9.4 K 2024-11-20T17:25:26,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9889f1e232eeda308e27b8e2d47795bd in 612ms, sequenceid=12, compaction requested=false 2024-11-20T17:25:26,001 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T17:25:26,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:26,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T17:25:26,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:26,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:26,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:26,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:26,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:26,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:26,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123586019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/592598c9d6184ade8195b96cc0340da3 is 50, key is test_row_0/A:col10/1732123525401/Put/seqid=0 2024-11-20T17:25:26,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123586021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123586024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123586033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742061_1237 (size=14341) 2024-11-20T17:25:26,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123586071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123586122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123586124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123586125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,139 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:26,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:26,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:26,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,292 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:26,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:26,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:26,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123586324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123586340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123586340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/592598c9d6184ade8195b96cc0340da3 2024-11-20T17:25:26,445 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:26,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:26,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:26,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/559ede58e3474cec9ceb436c40225acb is 50, key is test_row_0/B:col10/1732123525401/Put/seqid=0 2024-11-20T17:25:26,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742062_1238 (size=12001) 2024-11-20T17:25:26,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/559ede58e3474cec9ceb436c40225acb 2024-11-20T17:25:26,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d3abd3236dae46e386c614afd920498c is 50, key is test_row_0/C:col10/1732123525401/Put/seqid=0 2024-11-20T17:25:26,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742063_1239 (size=12001) 2024-11-20T17:25:26,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:25:26,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123586539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123586577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,598 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:26,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:26,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:26,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123586626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123586645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123586645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:26,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:26,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:26,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:26,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:26,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d3abd3236dae46e386c614afd920498c 2024-11-20T17:25:26,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/592598c9d6184ade8195b96cc0340da3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/592598c9d6184ade8195b96cc0340da3 2024-11-20T17:25:26,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/592598c9d6184ade8195b96cc0340da3, entries=200, sequenceid=41, filesize=14.0 K 2024-11-20T17:25:26,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/559ede58e3474cec9ceb436c40225acb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/559ede58e3474cec9ceb436c40225acb 2024-11-20T17:25:26,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/559ede58e3474cec9ceb436c40225acb, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T17:25:26,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d3abd3236dae46e386c614afd920498c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d3abd3236dae46e386c614afd920498c 2024-11-20T17:25:26,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d3abd3236dae46e386c614afd920498c, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T17:25:26,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 9889f1e232eeda308e27b8e2d47795bd in 884ms, sequenceid=41, compaction requested=false 2024-11-20T17:25:26,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:26,905 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:26,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T17:25:26,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:26,906 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:25:26,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:26,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:26,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:26,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:26,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:26,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:26,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/cd5cceaf177942eeab87eb6c63ea3e34 is 50, key is test_row_0/A:col10/1732123526020/Put/seqid=0 2024-11-20T17:25:26,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742064_1240 (size=9657) 2024-11-20T17:25:27,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:27,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:27,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123587161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123587162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123587163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123587264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123587264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123587265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,330 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/cd5cceaf177942eeab87eb6c63ea3e34 2024-11-20T17:25:27,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/bbeff0465780416087afd47d69c28c1b is 50, key is test_row_0/B:col10/1732123526020/Put/seqid=0 2024-11-20T17:25:27,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742065_1241 (size=9657) 2024-11-20T17:25:27,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123587467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123587467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123587467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:25:27,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123587549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123587579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,744 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/bbeff0465780416087afd47d69c28c1b 2024-11-20T17:25:27,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d083acd754c34e5bb0e2231749b1c894 is 50, key is test_row_0/C:col10/1732123526020/Put/seqid=0 2024-11-20T17:25:27,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742066_1242 (size=9657) 2024-11-20T17:25:27,762 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d083acd754c34e5bb0e2231749b1c894 2024-11-20T17:25:27,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/cd5cceaf177942eeab87eb6c63ea3e34 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/cd5cceaf177942eeab87eb6c63ea3e34 2024-11-20T17:25:27,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123587771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123587771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,773 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/cd5cceaf177942eeab87eb6c63ea3e34, entries=100, sequenceid=49, filesize=9.4 K 2024-11-20T17:25:27,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123587772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:27,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/bbeff0465780416087afd47d69c28c1b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/bbeff0465780416087afd47d69c28c1b 2024-11-20T17:25:27,778 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/bbeff0465780416087afd47d69c28c1b, entries=100, sequenceid=49, filesize=9.4 K 2024-11-20T17:25:27,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d083acd754c34e5bb0e2231749b1c894 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d083acd754c34e5bb0e2231749b1c894 2024-11-20T17:25:27,784 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d083acd754c34e5bb0e2231749b1c894, entries=100, sequenceid=49, filesize=9.4 K 2024-11-20T17:25:27,785 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 9889f1e232eeda308e27b8e2d47795bd in 880ms, sequenceid=49, compaction requested=true 2024-11-20T17:25:27,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:27,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:27,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-20T17:25:27,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-11-20T17:25:27,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-20T17:25:27,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4110 sec 2024-11-20T17:25:27,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 2.4150 sec 2024-11-20T17:25:28,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:28,276 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T17:25:28,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:28,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:28,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:28,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:28,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:28,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:28,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123588279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123588280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123588280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/78518f8b580349888c9b07ba054619ec is 50, key is test_row_0/A:col10/1732123528276/Put/seqid=0 2024-11-20T17:25:28,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742067_1243 (size=12001) 2024-11-20T17:25:28,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123588382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123588382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123588383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,419 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:25:28,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123588584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123588585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123588586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/78518f8b580349888c9b07ba054619ec 2024-11-20T17:25:28,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/79bb65a86de94ca79dfdc917be201b1e is 50, key is test_row_0/B:col10/1732123528276/Put/seqid=0 2024-11-20T17:25:28,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742068_1244 (size=12001) 2024-11-20T17:25:28,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123588888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123588889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:28,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:28,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123588889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/79bb65a86de94ca79dfdc917be201b1e 2024-11-20T17:25:29,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/bafca3295bd84d3fa21b4bb5dcca4166 is 50, key is test_row_0/C:col10/1732123528276/Put/seqid=0 2024-11-20T17:25:29,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742069_1245 (size=12001) 2024-11-20T17:25:29,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123589392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123589392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123589393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T17:25:29,479 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-20T17:25:29,481 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:29,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-20T17:25:29,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T17:25:29,483 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:29,483 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:29,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:29,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/bafca3295bd84d3fa21b4bb5dcca4166 2024-11-20T17:25:29,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/78518f8b580349888c9b07ba054619ec as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/78518f8b580349888c9b07ba054619ec 2024-11-20T17:25:29,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/78518f8b580349888c9b07ba054619ec, entries=150, sequenceid=79, filesize=11.7 K 2024-11-20T17:25:29,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/79bb65a86de94ca79dfdc917be201b1e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/79bb65a86de94ca79dfdc917be201b1e 2024-11-20T17:25:29,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/79bb65a86de94ca79dfdc917be201b1e, entries=150, sequenceid=79, filesize=11.7 K 2024-11-20T17:25:29,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/bafca3295bd84d3fa21b4bb5dcca4166 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bafca3295bd84d3fa21b4bb5dcca4166 2024-11-20T17:25:29,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bafca3295bd84d3fa21b4bb5dcca4166, entries=150, sequenceid=79, filesize=11.7 K 2024-11-20T17:25:29,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 9889f1e232eeda308e27b8e2d47795bd in 1265ms, sequenceid=79, compaction requested=true 2024-11-20T17:25:29,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:29,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:29,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:29,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:29,542 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:29,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:29,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:29,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:25:29,542 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:29,543 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45656 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:29,543 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/A is initiating minor compaction (all files) 2024-11-20T17:25:29,543 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/A in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,543 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/0d8a536fe969488cae799164c7ce2df0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/592598c9d6184ade8195b96cc0340da3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/cd5cceaf177942eeab87eb6c63ea3e34, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/78518f8b580349888c9b07ba054619ec] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=44.6 K 2024-11-20T17:25:29,544 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43316 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:29,544 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/B is initiating minor compaction (all files) 2024-11-20T17:25:29,544 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/B in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,544 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/9ecd303d61a04429a7127b1e9910d8ac, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/559ede58e3474cec9ceb436c40225acb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/bbeff0465780416087afd47d69c28c1b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/79bb65a86de94ca79dfdc917be201b1e] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=42.3 K 2024-11-20T17:25:29,544 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d8a536fe969488cae799164c7ce2df0, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732123525386 2024-11-20T17:25:29,545 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ecd303d61a04429a7127b1e9910d8ac, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732123525386 2024-11-20T17:25:29,545 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 592598c9d6184ade8195b96cc0340da3, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123525401 2024-11-20T17:25:29,545 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 559ede58e3474cec9ceb436c40225acb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123525401 2024-11-20T17:25:29,545 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd5cceaf177942eeab87eb6c63ea3e34, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123526020 2024-11-20T17:25:29,546 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting bbeff0465780416087afd47d69c28c1b, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123526020 2024-11-20T17:25:29,546 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 79bb65a86de94ca79dfdc917be201b1e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732123527158 2024-11-20T17:25:29,546 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78518f8b580349888c9b07ba054619ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732123527158 2024-11-20T17:25:29,556 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#B#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:29,557 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/ea7f199cbece42f78a1059cc39518047 is 50, key is test_row_0/B:col10/1732123528276/Put/seqid=0 2024-11-20T17:25:29,558 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#A#compaction#202 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:29,560 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/8adfba8509964ebbae77a78c61ad0ceb is 50, key is test_row_0/A:col10/1732123528276/Put/seqid=0 2024-11-20T17:25:29,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:29,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:25:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:29,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:29,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742070_1246 (size=12139) 2024-11-20T17:25:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T17:25:29,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/b8cc4297fabf4993a2e4b5d5236e9594 is 50, key is test_row_0/A:col10/1732123529575/Put/seqid=0 2024-11-20T17:25:29,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742071_1247 (size=12139) 2024-11-20T17:25:29,595 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/8adfba8509964ebbae77a78c61ad0ceb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8adfba8509964ebbae77a78c61ad0ceb 2024-11-20T17:25:29,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742072_1248 (size=12001) 2024-11-20T17:25:29,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/b8cc4297fabf4993a2e4b5d5236e9594 2024-11-20T17:25:29,608 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/A of 9889f1e232eeda308e27b8e2d47795bd into 8adfba8509964ebbae77a78c61ad0ceb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:29,608 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:29,608 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/A, priority=12, startTime=1732123529542; duration=0sec 2024-11-20T17:25:29,608 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:29,609 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:A 2024-11-20T17:25:29,609 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:29,610 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43316 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:29,610 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/C is initiating minor compaction (all files) 2024-11-20T17:25:29,610 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/C in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,610 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0a23ebb35f4f4b528958717712cf93cb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d3abd3236dae46e386c614afd920498c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d083acd754c34e5bb0e2231749b1c894, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bafca3295bd84d3fa21b4bb5dcca4166] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=42.3 K 2024-11-20T17:25:29,611 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a23ebb35f4f4b528958717712cf93cb, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732123525386 2024-11-20T17:25:29,612 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3abd3236dae46e386c614afd920498c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732123525401 2024-11-20T17:25:29,612 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting d083acd754c34e5bb0e2231749b1c894, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732123526020 2024-11-20T17:25:29,612 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting bafca3295bd84d3fa21b4bb5dcca4166, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732123527158 2024-11-20T17:25:29,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/49b6eb8059ce49efa18fa72ada98efd7 is 50, key is test_row_0/B:col10/1732123529575/Put/seqid=0 2024-11-20T17:25:29,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742073_1249 (size=12001) 2024-11-20T17:25:29,625 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#C#compaction#205 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:29,625 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/a5e5332abc554491a833f700e9afa2cf is 50, key is test_row_0/C:col10/1732123528276/Put/seqid=0 2024-11-20T17:25:29,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/49b6eb8059ce49efa18fa72ada98efd7 2024-11-20T17:25:29,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742074_1250 (size=12139) 2024-11-20T17:25:29,635 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:29,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:29,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:29,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/2258778756f74c679ce8139f669bb4d3 is 50, key is test_row_0/C:col10/1732123529575/Put/seqid=0 2024-11-20T17:25:29,643 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/a5e5332abc554491a833f700e9afa2cf as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a5e5332abc554491a833f700e9afa2cf 2024-11-20T17:25:29,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742075_1251 (size=12001) 2024-11-20T17:25:29,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123589646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123589649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/2258778756f74c679ce8139f669bb4d3 2024-11-20T17:25:29,660 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/C of 9889f1e232eeda308e27b8e2d47795bd into a5e5332abc554491a833f700e9afa2cf(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:29,660 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:29,660 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/C, priority=12, startTime=1732123529542; duration=0sec 2024-11-20T17:25:29,660 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:29,660 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:C 2024-11-20T17:25:29,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/b8cc4297fabf4993a2e4b5d5236e9594 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b8cc4297fabf4993a2e4b5d5236e9594 2024-11-20T17:25:29,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b8cc4297fabf4993a2e4b5d5236e9594, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T17:25:29,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/49b6eb8059ce49efa18fa72ada98efd7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/49b6eb8059ce49efa18fa72ada98efd7 2024-11-20T17:25:29,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/49b6eb8059ce49efa18fa72ada98efd7, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T17:25:29,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/2258778756f74c679ce8139f669bb4d3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2258778756f74c679ce8139f669bb4d3 2024-11-20T17:25:29,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2258778756f74c679ce8139f669bb4d3, entries=150, sequenceid=90, filesize=11.7 K 2024-11-20T17:25:29,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9889f1e232eeda308e27b8e2d47795bd in 99ms, sequenceid=90, compaction requested=false 2024-11-20T17:25:29,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:29,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:29,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:25:29,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:29,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:29,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:29,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:29,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:29,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:29,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/4567f8464d47411ca345360d092ef658 is 50, key is test_row_0/A:col10/1732123529751/Put/seqid=0 2024-11-20T17:25:29,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742076_1252 (size=14341) 2024-11-20T17:25:29,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123589763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123589764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T17:25:29,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:29,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:29,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:29,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123589866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:29,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123589867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:29,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:29,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:29,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:29,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:29,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:29,987 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/ea7f199cbece42f78a1059cc39518047 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ea7f199cbece42f78a1059cc39518047 2024-11-20T17:25:29,993 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/B of 9889f1e232eeda308e27b8e2d47795bd into ea7f199cbece42f78a1059cc39518047(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:29,993 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:29,993 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/B, priority=12, startTime=1732123529542; duration=0sec 2024-11-20T17:25:29,993 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:29,993 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:B 2024-11-20T17:25:30,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123590070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123590071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T17:25:30,096 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:30,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:30,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:30,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,164 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/4567f8464d47411ca345360d092ef658 2024-11-20T17:25:30,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/6f95f9d93dcb44d4a71dd260e8f1cd4e is 50, key is test_row_0/B:col10/1732123529751/Put/seqid=0 2024-11-20T17:25:30,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742077_1253 (size=12001) 2024-11-20T17:25:30,250 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:30,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:30,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:30,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123590373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123590374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123590396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123590401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,403 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:30,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:30,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:30,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123590402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,555 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:30,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:30,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:30,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,556 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/6f95f9d93dcb44d4a71dd260e8f1cd4e 2024-11-20T17:25:30,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T17:25:30,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/46b1ea963a8147c09f27a471e07cd7b6 is 50, key is test_row_0/C:col10/1732123529751/Put/seqid=0 2024-11-20T17:25:30,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742078_1254 (size=12001) 2024-11-20T17:25:30,709 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:30,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:30,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:30,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,862 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:30,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:30,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:30,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:30,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:30,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123590876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:30,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123590877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:30,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/46b1ea963a8147c09f27a471e07cd7b6 2024-11-20T17:25:31,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/4567f8464d47411ca345360d092ef658 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/4567f8464d47411ca345360d092ef658 2024-11-20T17:25:31,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/4567f8464d47411ca345360d092ef658, entries=200, sequenceid=118, filesize=14.0 K 2024-11-20T17:25:31,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/6f95f9d93dcb44d4a71dd260e8f1cd4e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6f95f9d93dcb44d4a71dd260e8f1cd4e 2024-11-20T17:25:31,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6f95f9d93dcb44d4a71dd260e8f1cd4e, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T17:25:31,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/46b1ea963a8147c09f27a471e07cd7b6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/46b1ea963a8147c09f27a471e07cd7b6 2024-11-20T17:25:31,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/46b1ea963a8147c09f27a471e07cd7b6, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T17:25:31,015 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:31,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:31,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:31,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:31,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:31,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:31,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:31,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 9889f1e232eeda308e27b8e2d47795bd in 1263ms, sequenceid=118, compaction requested=true 2024-11-20T17:25:31,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:31,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:31,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:31,016 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:31,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:31,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:31,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:31,016 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:31,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:31,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:31,017 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:31,017 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:31,018 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/B is initiating minor compaction (all files) 2024-11-20T17:25:31,018 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/A is initiating minor compaction (all files) 2024-11-20T17:25:31,018 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/B in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:31,018 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/A in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:31,018 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ea7f199cbece42f78a1059cc39518047, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/49b6eb8059ce49efa18fa72ada98efd7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6f95f9d93dcb44d4a71dd260e8f1cd4e] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=35.3 K 2024-11-20T17:25:31,018 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8adfba8509964ebbae77a78c61ad0ceb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b8cc4297fabf4993a2e4b5d5236e9594, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/4567f8464d47411ca345360d092ef658] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=37.6 K 2024-11-20T17:25:31,018 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ea7f199cbece42f78a1059cc39518047, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732123527158 2024-11-20T17:25:31,018 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8adfba8509964ebbae77a78c61ad0ceb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732123527158 2024-11-20T17:25:31,019 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8cc4297fabf4993a2e4b5d5236e9594, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732123529570 2024-11-20T17:25:31,019 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 49b6eb8059ce49efa18fa72ada98efd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732123529570 2024-11-20T17:25:31,019 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4567f8464d47411ca345360d092ef658, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123529644 2024-11-20T17:25:31,019 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f95f9d93dcb44d4a71dd260e8f1cd4e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123529645 2024-11-20T17:25:31,028 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#A#compaction#210 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:31,028 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/538d014203874a1ab2b46746aa83df78 is 50, key is test_row_0/A:col10/1732123529751/Put/seqid=0 2024-11-20T17:25:31,032 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#B#compaction#211 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:31,032 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/7bb9bf7f86114efb8b7afd3021240577 is 50, key is test_row_0/B:col10/1732123529751/Put/seqid=0 2024-11-20T17:25:31,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742079_1255 (size=12241) 2024-11-20T17:25:31,049 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/538d014203874a1ab2b46746aa83df78 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/538d014203874a1ab2b46746aa83df78 2024-11-20T17:25:31,056 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/A of 9889f1e232eeda308e27b8e2d47795bd into 538d014203874a1ab2b46746aa83df78(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:31,056 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:31,056 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/A, priority=13, startTime=1732123531016; duration=0sec 2024-11-20T17:25:31,056 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:31,057 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:A 2024-11-20T17:25:31,057 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:31,058 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:31,058 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/C is initiating minor compaction (all files) 2024-11-20T17:25:31,058 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/C in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:31,058 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a5e5332abc554491a833f700e9afa2cf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2258778756f74c679ce8139f669bb4d3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/46b1ea963a8147c09f27a471e07cd7b6] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=35.3 K 2024-11-20T17:25:31,058 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5e5332abc554491a833f700e9afa2cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732123527158 2024-11-20T17:25:31,059 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2258778756f74c679ce8139f669bb4d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732123529570 2024-11-20T17:25:31,060 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46b1ea963a8147c09f27a471e07cd7b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123529645 2024-11-20T17:25:31,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742080_1256 (size=12241) 2024-11-20T17:25:31,070 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#C#compaction#212 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:31,071 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/4d3c52eb00ea4afe9c587b90632588f0 is 50, key is test_row_0/C:col10/1732123529751/Put/seqid=0 2024-11-20T17:25:31,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742081_1257 (size=12241) 2024-11-20T17:25:31,084 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/4d3c52eb00ea4afe9c587b90632588f0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4d3c52eb00ea4afe9c587b90632588f0 2024-11-20T17:25:31,090 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/C of 9889f1e232eeda308e27b8e2d47795bd into 4d3c52eb00ea4afe9c587b90632588f0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:31,090 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:31,090 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/C, priority=13, startTime=1732123531016; duration=0sec 2024-11-20T17:25:31,090 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:31,090 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:C 2024-11-20T17:25:31,168 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:31,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T17:25:31,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:31,169 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T17:25:31,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:31,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:31,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:31,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:31,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:31,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:31,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/888a2e970ea3401d873021539c0cbc52 is 50, key is test_row_0/A:col10/1732123529757/Put/seqid=0 2024-11-20T17:25:31,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742082_1258 (size=12001) 2024-11-20T17:25:31,472 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/7bb9bf7f86114efb8b7afd3021240577 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7bb9bf7f86114efb8b7afd3021240577 2024-11-20T17:25:31,478 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/B of 9889f1e232eeda308e27b8e2d47795bd into 7bb9bf7f86114efb8b7afd3021240577(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:31,478 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:31,478 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/B, priority=13, startTime=1732123531016; duration=0sec 2024-11-20T17:25:31,478 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:31,478 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:B 2024-11-20T17:25:31,584 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/888a2e970ea3401d873021539c0cbc52 2024-11-20T17:25:31,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T17:25:31,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/462d5023b324466297da5c51cb6f4c20 is 50, key is test_row_0/B:col10/1732123529757/Put/seqid=0 2024-11-20T17:25:31,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742083_1259 (size=12001) 2024-11-20T17:25:31,610 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/462d5023b324466297da5c51cb6f4c20 2024-11-20T17:25:31,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/823991bfd679429d99d4ddedf9ee33ed is 50, key is test_row_0/C:col10/1732123529757/Put/seqid=0 2024-11-20T17:25:31,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742084_1260 (size=12001) 2024-11-20T17:25:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:31,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:31,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123591930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:31,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123591931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,028 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/823991bfd679429d99d4ddedf9ee33ed 2024-11-20T17:25:32,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/888a2e970ea3401d873021539c0cbc52 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/888a2e970ea3401d873021539c0cbc52 2024-11-20T17:25:32,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123592033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123592033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,037 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/888a2e970ea3401d873021539c0cbc52, entries=150, sequenceid=131, filesize=11.7 K 2024-11-20T17:25:32,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/462d5023b324466297da5c51cb6f4c20 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/462d5023b324466297da5c51cb6f4c20 2024-11-20T17:25:32,042 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/462d5023b324466297da5c51cb6f4c20, entries=150, sequenceid=131, filesize=11.7 K 2024-11-20T17:25:32,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/823991bfd679429d99d4ddedf9ee33ed as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/823991bfd679429d99d4ddedf9ee33ed 2024-11-20T17:25:32,046 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/823991bfd679429d99d4ddedf9ee33ed, entries=150, sequenceid=131, filesize=11.7 K 2024-11-20T17:25:32,047 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 9889f1e232eeda308e27b8e2d47795bd in 878ms, sequenceid=131, compaction requested=false 2024-11-20T17:25:32,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:32,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:32,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-20T17:25:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-20T17:25:32,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-20T17:25:32,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5640 sec 2024-11-20T17:25:32,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 2.5700 sec 2024-11-20T17:25:32,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:32,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T17:25:32,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:32,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:32,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:32,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:32,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:32,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:32,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/8e0f283886c944b49cd5afcbe2cdffdc is 50, key is test_row_0/A:col10/1732123531930/Put/seqid=0 2024-11-20T17:25:32,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123592244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123592245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742085_1261 (size=12151) 2024-11-20T17:25:32,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123592346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123592347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123592412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123592412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,414 DEBUG [Thread-1080 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:32,414 DEBUG [Thread-1082 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:32,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123592416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,417 DEBUG [Thread-1088 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:32,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123592548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123592548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/8e0f283886c944b49cd5afcbe2cdffdc 2024-11-20T17:25:32,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/4af8db9a466f4806a4a1ba5da28f7a19 is 50, key is test_row_0/B:col10/1732123531930/Put/seqid=0 2024-11-20T17:25:32,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742086_1262 (size=12151) 2024-11-20T17:25:32,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:32,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123592853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:32,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123592853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:33,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/4af8db9a466f4806a4a1ba5da28f7a19 2024-11-20T17:25:33,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/998f57c729f846db826424f5b440be63 is 50, key is test_row_0/C:col10/1732123531930/Put/seqid=0 2024-11-20T17:25:33,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742087_1263 (size=12151) 2024-11-20T17:25:33,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/998f57c729f846db826424f5b440be63 2024-11-20T17:25:33,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/8e0f283886c944b49cd5afcbe2cdffdc as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8e0f283886c944b49cd5afcbe2cdffdc 2024-11-20T17:25:33,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8e0f283886c944b49cd5afcbe2cdffdc, entries=150, sequenceid=161, filesize=11.9 K 2024-11-20T17:25:33,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/4af8db9a466f4806a4a1ba5da28f7a19 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/4af8db9a466f4806a4a1ba5da28f7a19 2024-11-20T17:25:33,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/4af8db9a466f4806a4a1ba5da28f7a19, entries=150, sequenceid=161, filesize=11.9 K 2024-11-20T17:25:33,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/998f57c729f846db826424f5b440be63 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/998f57c729f846db826424f5b440be63 2024-11-20T17:25:33,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/998f57c729f846db826424f5b440be63, entries=150, sequenceid=161, filesize=11.9 K 2024-11-20T17:25:33,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 9889f1e232eeda308e27b8e2d47795bd in 864ms, sequenceid=161, compaction requested=true 2024-11-20T17:25:33,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:33,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:33,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:33,102 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:33,102 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:33,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:33,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:33,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:33,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:33,103 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:33,103 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:33,103 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/A is initiating minor compaction (all files) 2024-11-20T17:25:33,103 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/B is initiating minor compaction (all files) 2024-11-20T17:25:33,103 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/A in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:33,103 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/B in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:33,103 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/538d014203874a1ab2b46746aa83df78, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/888a2e970ea3401d873021539c0cbc52, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8e0f283886c944b49cd5afcbe2cdffdc] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=35.5 K 2024-11-20T17:25:33,103 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7bb9bf7f86114efb8b7afd3021240577, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/462d5023b324466297da5c51cb6f4c20, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/4af8db9a466f4806a4a1ba5da28f7a19] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=35.5 K 2024-11-20T17:25:33,104 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 538d014203874a1ab2b46746aa83df78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123529645 2024-11-20T17:25:33,104 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bb9bf7f86114efb8b7afd3021240577, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123529645 2024-11-20T17:25:33,104 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 888a2e970ea3401d873021539c0cbc52, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123529757 2024-11-20T17:25:33,104 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 462d5023b324466297da5c51cb6f4c20, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123529757 2024-11-20T17:25:33,105 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e0f283886c944b49cd5afcbe2cdffdc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732123531930 2024-11-20T17:25:33,105 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4af8db9a466f4806a4a1ba5da28f7a19, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732123531930 2024-11-20T17:25:33,114 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#A#compaction#219 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:33,114 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/af7a5a6d387e43fb989968e90cd6288e is 50, key is test_row_0/A:col10/1732123531930/Put/seqid=0 2024-11-20T17:25:33,117 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#B#compaction#220 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:33,117 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/7fb0cdd817cc468c8cd595e21aa63bef is 50, key is test_row_0/B:col10/1732123531930/Put/seqid=0 2024-11-20T17:25:33,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742088_1264 (size=12493) 2024-11-20T17:25:33,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742089_1265 (size=12493) 2024-11-20T17:25:33,124 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/af7a5a6d387e43fb989968e90cd6288e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/af7a5a6d387e43fb989968e90cd6288e 2024-11-20T17:25:33,131 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/7fb0cdd817cc468c8cd595e21aa63bef as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7fb0cdd817cc468c8cd595e21aa63bef 2024-11-20T17:25:33,131 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/A of 9889f1e232eeda308e27b8e2d47795bd into af7a5a6d387e43fb989968e90cd6288e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:33,131 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:33,131 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/A, priority=13, startTime=1732123533102; duration=0sec 2024-11-20T17:25:33,131 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:33,131 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:A 2024-11-20T17:25:33,131 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:33,132 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:33,132 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/C is initiating minor compaction (all files) 2024-11-20T17:25:33,133 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/C in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:33,133 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4d3c52eb00ea4afe9c587b90632588f0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/823991bfd679429d99d4ddedf9ee33ed, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/998f57c729f846db826424f5b440be63] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=35.5 K 2024-11-20T17:25:33,133 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d3c52eb00ea4afe9c587b90632588f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123529645 2024-11-20T17:25:33,134 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 823991bfd679429d99d4ddedf9ee33ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123529757 2024-11-20T17:25:33,134 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 998f57c729f846db826424f5b440be63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732123531930 2024-11-20T17:25:33,136 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/B of 9889f1e232eeda308e27b8e2d47795bd into 7fb0cdd817cc468c8cd595e21aa63bef(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:33,136 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:33,136 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/B, priority=13, startTime=1732123533102; duration=0sec 2024-11-20T17:25:33,137 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:33,137 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:B 2024-11-20T17:25:33,142 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#C#compaction#221 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:33,143 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/257d2a8166fe40598cb6a76c9911c119 is 50, key is test_row_0/C:col10/1732123531930/Put/seqid=0 2024-11-20T17:25:33,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742090_1266 (size=12493) 2024-11-20T17:25:33,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:33,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:25:33,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:33,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:33,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:33,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:33,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:33,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:33,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/77c775a35323417fbf0f8484bbda755d is 50, key is test_row_0/A:col10/1732123533360/Put/seqid=0 2024-11-20T17:25:33,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742091_1267 (size=12151) 2024-11-20T17:25:33,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:33,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123593411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:33,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:33,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123593412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:33,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:33,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123593514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:33,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:33,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123593515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:33,554 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/257d2a8166fe40598cb6a76c9911c119 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/257d2a8166fe40598cb6a76c9911c119 2024-11-20T17:25:33,559 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/C of 9889f1e232eeda308e27b8e2d47795bd into 257d2a8166fe40598cb6a76c9911c119(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:33,559 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:33,559 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/C, priority=13, startTime=1732123533102; duration=0sec 2024-11-20T17:25:33,559 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:33,559 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:C 2024-11-20T17:25:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T17:25:33,587 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-20T17:25:33,589 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:33,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-20T17:25:33,590 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:33,591 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:33,591 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:25:33,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:25:33,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:33,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123593716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:33,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:33,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123593719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:33,742 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:33,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:25:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:33,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:33,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:33,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:33,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/77c775a35323417fbf0f8484bbda755d 2024-11-20T17:25:33,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/34fec16b53e3424ebe42b450926f7970 is 50, key is test_row_0/B:col10/1732123533360/Put/seqid=0 2024-11-20T17:25:33,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742092_1268 (size=12151) 2024-11-20T17:25:33,895 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:33,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:25:33,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:33,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:33,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:25:33,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:33,896 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:33,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:33,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:34,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123594019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:34,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:34,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123594021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:34,048 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:34,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:25:34,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:34,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,186 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/34fec16b53e3424ebe42b450926f7970 2024-11-20T17:25:34,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/114a4cc8fbb947dda2ec886ae874de6f is 50, key is test_row_0/C:col10/1732123533360/Put/seqid=0 2024-11-20T17:25:34,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:25:34,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742093_1269 (size=12151) 2024-11-20T17:25:34,201 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:34,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:25:34,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:34,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,354 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:34,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:25:34,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:34,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,507 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:34,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:25:34,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:34,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:34,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:34,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123594522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:34,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:34,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123594524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:34,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/114a4cc8fbb947dda2ec886ae874de6f 2024-11-20T17:25:34,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/77c775a35323417fbf0f8484bbda755d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/77c775a35323417fbf0f8484bbda755d 2024-11-20T17:25:34,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/77c775a35323417fbf0f8484bbda755d, entries=150, sequenceid=175, filesize=11.9 K 2024-11-20T17:25:34,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/34fec16b53e3424ebe42b450926f7970 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/34fec16b53e3424ebe42b450926f7970 2024-11-20T17:25:34,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/34fec16b53e3424ebe42b450926f7970, entries=150, sequenceid=175, filesize=11.9 K 2024-11-20T17:25:34,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/114a4cc8fbb947dda2ec886ae874de6f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/114a4cc8fbb947dda2ec886ae874de6f 2024-11-20T17:25:34,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/114a4cc8fbb947dda2ec886ae874de6f, entries=150, sequenceid=175, filesize=11.9 K 2024-11-20T17:25:34,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 9889f1e232eeda308e27b8e2d47795bd in 1270ms, sequenceid=175, compaction requested=false 2024-11-20T17:25:34,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:34,660 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:34,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T17:25:34,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:34,661 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:25:34,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:34,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:34,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:34,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:34,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:34,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:34,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/17cd00c6dadd48b1931f761c9810ce9b is 50, key is test_row_0/A:col10/1732123533403/Put/seqid=0 2024-11-20T17:25:34,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742094_1270 (size=12151) 2024-11-20T17:25:34,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:25:35,071 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/17cd00c6dadd48b1931f761c9810ce9b 2024-11-20T17:25:35,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/2a0f8efc4b834f59bc442648037667d7 is 50, key is test_row_0/B:col10/1732123533403/Put/seqid=0 2024-11-20T17:25:35,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742095_1271 (size=12151) 2024-11-20T17:25:35,083 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/2a0f8efc4b834f59bc442648037667d7 2024-11-20T17:25:35,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/0b5323fbc13748de9f4b60cfe59d1e44 is 50, key is test_row_0/C:col10/1732123533403/Put/seqid=0 2024-11-20T17:25:35,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742096_1272 (size=12151) 2024-11-20T17:25:35,106 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/0b5323fbc13748de9f4b60cfe59d1e44 2024-11-20T17:25:35,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/17cd00c6dadd48b1931f761c9810ce9b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/17cd00c6dadd48b1931f761c9810ce9b 2024-11-20T17:25:35,114 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/17cd00c6dadd48b1931f761c9810ce9b, entries=150, sequenceid=200, filesize=11.9 K 2024-11-20T17:25:35,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/2a0f8efc4b834f59bc442648037667d7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/2a0f8efc4b834f59bc442648037667d7 2024-11-20T17:25:35,119 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/2a0f8efc4b834f59bc442648037667d7, entries=150, sequenceid=200, filesize=11.9 K 2024-11-20T17:25:35,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/0b5323fbc13748de9f4b60cfe59d1e44 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0b5323fbc13748de9f4b60cfe59d1e44 2024-11-20T17:25:35,124 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0b5323fbc13748de9f4b60cfe59d1e44, entries=150, sequenceid=200, filesize=11.9 K 2024-11-20T17:25:35,125 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 9889f1e232eeda308e27b8e2d47795bd in 463ms, sequenceid=200, compaction requested=true 2024-11-20T17:25:35,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:35,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:35,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-20T17:25:35,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-20T17:25:35,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-20T17:25:35,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5350 sec 2024-11-20T17:25:35,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.5390 sec 2024-11-20T17:25:35,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:35,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:25:35,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:35,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:35,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:35,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:35,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:35,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:35,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/2d555a824bbf42bd82d9cf7721ae00ad is 50, key is test_row_0/A:col10/1732123535540/Put/seqid=0 2024-11-20T17:25:35,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742097_1273 (size=14541) 2024-11-20T17:25:35,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:35,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123595595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:35,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:35,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123595598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:35,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T17:25:35,699 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-20T17:25:35,700 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:35,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:35,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123595699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:35,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:35,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-20T17:25:35,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123595701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:35,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:25:35,702 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:35,703 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:35,703 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:35,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:25:35,854 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:35,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:25:35,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:35,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:35,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:35,855 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:35,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:35,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:35,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123595902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:35,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123595902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:35,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/2d555a824bbf42bd82d9cf7721ae00ad 2024-11-20T17:25:35,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/3aa270a121554fc8b85b953be55e7c81 is 50, key is test_row_0/B:col10/1732123535540/Put/seqid=0 2024-11-20T17:25:35,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742098_1274 (size=12151) 2024-11-20T17:25:36,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:25:36,007 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:36,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:25:36,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:36,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,007 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,159 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:36,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:25:36,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:36,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:36,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123596205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:36,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:36,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123596206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:36,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:25:36,312 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:36,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:25:36,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:36,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,367 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/3aa270a121554fc8b85b953be55e7c81 2024-11-20T17:25:36,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/632204cabe6142a1b6952f839260ec1d is 50, key is test_row_0/C:col10/1732123535540/Put/seqid=0 2024-11-20T17:25:36,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742099_1275 (size=12151) 2024-11-20T17:25:36,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123596435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:36,438 DEBUG [Thread-1088 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:36,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123596436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:36,439 DEBUG [Thread-1082 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:36,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123596447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:36,451 DEBUG [Thread-1080 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:36,465 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:36,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:25:36,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:36,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,618 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:36,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:25:36,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:36,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:36,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123596710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:36,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123596711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:36,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:36,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:25:36,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:36,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:36,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/632204cabe6142a1b6952f839260ec1d 2024-11-20T17:25:36,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/2d555a824bbf42bd82d9cf7721ae00ad as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/2d555a824bbf42bd82d9cf7721ae00ad 2024-11-20T17:25:36,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/2d555a824bbf42bd82d9cf7721ae00ad, entries=200, sequenceid=211, filesize=14.2 K 2024-11-20T17:25:36,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/3aa270a121554fc8b85b953be55e7c81 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/3aa270a121554fc8b85b953be55e7c81 2024-11-20T17:25:36,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/3aa270a121554fc8b85b953be55e7c81, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T17:25:36,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/632204cabe6142a1b6952f839260ec1d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/632204cabe6142a1b6952f839260ec1d 2024-11-20T17:25:36,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/632204cabe6142a1b6952f839260ec1d, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T17:25:36,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9889f1e232eeda308e27b8e2d47795bd in 1262ms, sequenceid=211, compaction requested=true 2024-11-20T17:25:36,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:36,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:36,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:36,803 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:36,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:36,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:36,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:36,803 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:36,803 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:25:36,805 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:36,805 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/B is initiating minor compaction (all files) 2024-11-20T17:25:36,805 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/B in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,805 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7fb0cdd817cc468c8cd595e21aa63bef, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/34fec16b53e3424ebe42b450926f7970, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/2a0f8efc4b834f59bc442648037667d7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/3aa270a121554fc8b85b953be55e7c81] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=47.8 K 2024-11-20T17:25:36,805 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51336 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:36,806 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/A is initiating minor compaction (all files) 2024-11-20T17:25:36,806 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/A in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,806 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/af7a5a6d387e43fb989968e90cd6288e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/77c775a35323417fbf0f8484bbda755d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/17cd00c6dadd48b1931f761c9810ce9b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/2d555a824bbf42bd82d9cf7721ae00ad] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=50.1 K 2024-11-20T17:25:36,806 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fb0cdd817cc468c8cd595e21aa63bef, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732123531930 2024-11-20T17:25:36,806 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 34fec16b53e3424ebe42b450926f7970, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732123533358 2024-11-20T17:25:36,807 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting af7a5a6d387e43fb989968e90cd6288e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732123531930 2024-11-20T17:25:36,807 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a0f8efc4b834f59bc442648037667d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732123533403 2024-11-20T17:25:36,808 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3aa270a121554fc8b85b953be55e7c81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123535535 2024-11-20T17:25:36,808 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77c775a35323417fbf0f8484bbda755d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732123533358 2024-11-20T17:25:36,808 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17cd00c6dadd48b1931f761c9810ce9b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732123533403 2024-11-20T17:25:36,808 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d555a824bbf42bd82d9cf7721ae00ad, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123535535 2024-11-20T17:25:36,819 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#B#compaction#231 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:36,819 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#A#compaction#232 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:36,820 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/8b04ec2ba4be4fb2a21baaaf3ce58a90 is 50, key is test_row_0/B:col10/1732123535540/Put/seqid=0 2024-11-20T17:25:36,820 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/412d3d6ebc794c2ab736911dbd185ae8 is 50, key is test_row_0/A:col10/1732123535540/Put/seqid=0 2024-11-20T17:25:36,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742101_1277 (size=12629) 2024-11-20T17:25:36,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742100_1276 (size=12629) 2024-11-20T17:25:36,923 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:36,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T17:25:36,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:36,924 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:25:36,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:36,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:36,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:36,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:36,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:36,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:36,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/7d94b86164c5441d9970af6b14dc3983 is 50, key is test_row_0/A:col10/1732123535594/Put/seqid=0 2024-11-20T17:25:36,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742102_1278 (size=12151) 2024-11-20T17:25:37,232 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/412d3d6ebc794c2ab736911dbd185ae8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/412d3d6ebc794c2ab736911dbd185ae8 2024-11-20T17:25:37,236 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/8b04ec2ba4be4fb2a21baaaf3ce58a90 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/8b04ec2ba4be4fb2a21baaaf3ce58a90 2024-11-20T17:25:37,238 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/A of 9889f1e232eeda308e27b8e2d47795bd into 412d3d6ebc794c2ab736911dbd185ae8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:37,238 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:37,238 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/A, priority=12, startTime=1732123536803; duration=0sec 2024-11-20T17:25:37,238 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:37,238 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:A 2024-11-20T17:25:37,238 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:37,240 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:37,240 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/C is initiating minor compaction (all files) 2024-11-20T17:25:37,240 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/C in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:37,240 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/257d2a8166fe40598cb6a76c9911c119, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/114a4cc8fbb947dda2ec886ae874de6f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0b5323fbc13748de9f4b60cfe59d1e44, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/632204cabe6142a1b6952f839260ec1d] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=47.8 K 2024-11-20T17:25:37,240 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 257d2a8166fe40598cb6a76c9911c119, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732123531930 2024-11-20T17:25:37,241 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 114a4cc8fbb947dda2ec886ae874de6f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1732123533358 2024-11-20T17:25:37,242 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/B of 9889f1e232eeda308e27b8e2d47795bd into 8b04ec2ba4be4fb2a21baaaf3ce58a90(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:37,242 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:37,242 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/B, priority=12, startTime=1732123536803; duration=0sec 2024-11-20T17:25:37,242 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:37,242 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:B 2024-11-20T17:25:37,242 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b5323fbc13748de9f4b60cfe59d1e44, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732123533403 2024-11-20T17:25:37,243 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 632204cabe6142a1b6952f839260ec1d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123535535 2024-11-20T17:25:37,252 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#C#compaction#234 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:37,253 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/707273678ed14e7b9e0955902d2cb6b9 is 50, key is test_row_0/C:col10/1732123535540/Put/seqid=0 2024-11-20T17:25:37,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742103_1279 (size=12629) 2024-11-20T17:25:37,336 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/7d94b86164c5441d9970af6b14dc3983 2024-11-20T17:25:37,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/96103357b86643eb9a3e969b12843476 is 50, key is test_row_0/B:col10/1732123535594/Put/seqid=0 2024-11-20T17:25:37,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742104_1280 (size=12151) 2024-11-20T17:25:37,667 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/707273678ed14e7b9e0955902d2cb6b9 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/707273678ed14e7b9e0955902d2cb6b9 2024-11-20T17:25:37,672 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/C of 9889f1e232eeda308e27b8e2d47795bd into 707273678ed14e7b9e0955902d2cb6b9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:37,672 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:37,672 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/C, priority=12, startTime=1732123536803; duration=0sec 2024-11-20T17:25:37,672 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:37,672 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:C 2024-11-20T17:25:37,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:37,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:37,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123597724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:37,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:37,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123597725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:37,748 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/96103357b86643eb9a3e969b12843476 2024-11-20T17:25:37,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/c6f492708edd4a478c34336e348638d6 is 50, key is test_row_0/C:col10/1732123535594/Put/seqid=0 2024-11-20T17:25:37,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742105_1281 (size=12151) 2024-11-20T17:25:37,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:25:37,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123597827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:37,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123597827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123598029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123598030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,161 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/c6f492708edd4a478c34336e348638d6 2024-11-20T17:25:38,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/7d94b86164c5441d9970af6b14dc3983 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/7d94b86164c5441d9970af6b14dc3983 2024-11-20T17:25:38,171 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/7d94b86164c5441d9970af6b14dc3983, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T17:25:38,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/96103357b86643eb9a3e969b12843476 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/96103357b86643eb9a3e969b12843476 2024-11-20T17:25:38,176 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/96103357b86643eb9a3e969b12843476, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T17:25:38,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/c6f492708edd4a478c34336e348638d6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c6f492708edd4a478c34336e348638d6 2024-11-20T17:25:38,181 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c6f492708edd4a478c34336e348638d6, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T17:25:38,182 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 9889f1e232eeda308e27b8e2d47795bd in 1258ms, sequenceid=236, compaction requested=false 2024-11-20T17:25:38,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:38,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:38,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-20T17:25:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-20T17:25:38,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T17:25:38,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4800 sec 2024-11-20T17:25:38,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 2.4860 sec 2024-11-20T17:25:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:38,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:25:38,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:38,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:38,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:38,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:38,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:38,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:38,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/69f0169e44634e83897f485373507b54 is 50, key is test_row_0/A:col10/1732123538333/Put/seqid=0 2024-11-20T17:25:38,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742106_1282 (size=14541) 2024-11-20T17:25:38,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/69f0169e44634e83897f485373507b54 2024-11-20T17:25:38,355 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/133dfba96fdd479d91afab5bd0160aaf is 50, key is test_row_0/B:col10/1732123538333/Put/seqid=0 2024-11-20T17:25:38,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123598362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742107_1283 (size=12151) 2024-11-20T17:25:38,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123598364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,366 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/133dfba96fdd479d91afab5bd0160aaf 2024-11-20T17:25:38,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/bdd69299ac4d4dad93cfb65fc85e210d is 50, key is test_row_0/C:col10/1732123538333/Put/seqid=0 2024-11-20T17:25:38,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742108_1284 (size=12151) 2024-11-20T17:25:38,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/bdd69299ac4d4dad93cfb65fc85e210d 2024-11-20T17:25:38,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/69f0169e44634e83897f485373507b54 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/69f0169e44634e83897f485373507b54 2024-11-20T17:25:38,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/69f0169e44634e83897f485373507b54, entries=200, sequenceid=252, filesize=14.2 K 2024-11-20T17:25:38,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/133dfba96fdd479d91afab5bd0160aaf as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/133dfba96fdd479d91afab5bd0160aaf 2024-11-20T17:25:38,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/133dfba96fdd479d91afab5bd0160aaf, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T17:25:38,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/bdd69299ac4d4dad93cfb65fc85e210d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bdd69299ac4d4dad93cfb65fc85e210d 2024-11-20T17:25:38,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bdd69299ac4d4dad93cfb65fc85e210d, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T17:25:38,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9889f1e232eeda308e27b8e2d47795bd in 79ms, sequenceid=252, compaction requested=true 2024-11-20T17:25:38,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:38,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:38,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:38,412 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:38,412 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:38,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:38,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:38,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:38,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:38,414 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:38,414 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/B is initiating minor compaction (all files) 2024-11-20T17:25:38,414 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/B in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:38,414 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/8b04ec2ba4be4fb2a21baaaf3ce58a90, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/96103357b86643eb9a3e969b12843476, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/133dfba96fdd479d91afab5bd0160aaf] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=36.1 K 2024-11-20T17:25:38,414 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39321 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:38,414 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/A is initiating minor compaction (all files) 2024-11-20T17:25:38,414 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/A in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:38,414 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/412d3d6ebc794c2ab736911dbd185ae8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/7d94b86164c5441d9970af6b14dc3983, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/69f0169e44634e83897f485373507b54] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=38.4 K 2024-11-20T17:25:38,415 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b04ec2ba4be4fb2a21baaaf3ce58a90, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123535535 2024-11-20T17:25:38,415 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 412d3d6ebc794c2ab736911dbd185ae8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123535535 2024-11-20T17:25:38,415 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 96103357b86643eb9a3e969b12843476, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732123535593 2024-11-20T17:25:38,416 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d94b86164c5441d9970af6b14dc3983, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732123535593 2024-11-20T17:25:38,416 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 133dfba96fdd479d91afab5bd0160aaf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123537720 2024-11-20T17:25:38,416 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69f0169e44634e83897f485373507b54, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123537720 2024-11-20T17:25:38,427 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#A#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:38,428 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/478a8ddc1ac94cd8bf6d3cabfa64d4ed is 50, key is test_row_0/A:col10/1732123538333/Put/seqid=0 2024-11-20T17:25:38,430 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#B#compaction#241 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:38,431 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/b7dfc262bdc6456ab9c387ebdd53954e is 50, key is test_row_0/B:col10/1732123538333/Put/seqid=0 2024-11-20T17:25:38,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742110_1286 (size=12731) 2024-11-20T17:25:38,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742109_1285 (size=12731) 2024-11-20T17:25:38,458 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/b7dfc262bdc6456ab9c387ebdd53954e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b7dfc262bdc6456ab9c387ebdd53954e 2024-11-20T17:25:38,465 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/B of 9889f1e232eeda308e27b8e2d47795bd into b7dfc262bdc6456ab9c387ebdd53954e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:38,465 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:38,465 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/B, priority=13, startTime=1732123538412; duration=0sec 2024-11-20T17:25:38,465 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:38,465 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:B 2024-11-20T17:25:38,465 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:38,466 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:38,466 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/C is initiating minor compaction (all files) 2024-11-20T17:25:38,466 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/C in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:38,467 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/707273678ed14e7b9e0955902d2cb6b9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c6f492708edd4a478c34336e348638d6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bdd69299ac4d4dad93cfb65fc85e210d] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=36.1 K 2024-11-20T17:25:38,467 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 707273678ed14e7b9e0955902d2cb6b9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123535535 2024-11-20T17:25:38,467 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c6f492708edd4a478c34336e348638d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732123535593 2024-11-20T17:25:38,468 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting bdd69299ac4d4dad93cfb65fc85e210d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123537720 2024-11-20T17:25:38,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:38,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:25:38,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:38,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:38,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:38,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:38,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:38,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:38,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/9e557d9d06794dd5b238d6836cabefcc is 50, key is test_row_0/A:col10/1732123538358/Put/seqid=0 2024-11-20T17:25:38,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123598480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123598481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,490 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#C#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:38,491 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/66de1dd7041148208abee5b0347af120 is 50, key is test_row_0/C:col10/1732123538333/Put/seqid=0 2024-11-20T17:25:38,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742111_1287 (size=12301) 2024-11-20T17:25:38,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742112_1288 (size=12731) 2024-11-20T17:25:38,507 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/66de1dd7041148208abee5b0347af120 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/66de1dd7041148208abee5b0347af120 2024-11-20T17:25:38,512 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/C of 9889f1e232eeda308e27b8e2d47795bd into 66de1dd7041148208abee5b0347af120(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:38,512 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:38,512 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/C, priority=13, startTime=1732123538412; duration=0sec 2024-11-20T17:25:38,512 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:38,512 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:C 2024-11-20T17:25:38,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123598585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123598585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123598788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:38,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123598788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:38,856 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/478a8ddc1ac94cd8bf6d3cabfa64d4ed as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/478a8ddc1ac94cd8bf6d3cabfa64d4ed 2024-11-20T17:25:38,861 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/A of 9889f1e232eeda308e27b8e2d47795bd into 478a8ddc1ac94cd8bf6d3cabfa64d4ed(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:38,861 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:38,861 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/A, priority=13, startTime=1732123538412; duration=0sec 2024-11-20T17:25:38,861 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:38,861 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:A 2024-11-20T17:25:38,896 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/9e557d9d06794dd5b238d6836cabefcc 2024-11-20T17:25:38,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/6173c54dae1f49d2a16cd51ab361870f is 50, key is test_row_0/B:col10/1732123538358/Put/seqid=0 2024-11-20T17:25:38,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742113_1289 (size=12301) 2024-11-20T17:25:39,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123599091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:39,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123599092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:39,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/6173c54dae1f49d2a16cd51ab361870f 2024-11-20T17:25:39,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/2ffd39fd83ee4b2aa378f0afcc68d180 is 50, key is test_row_0/C:col10/1732123538358/Put/seqid=0 2024-11-20T17:25:39,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742114_1290 (size=12301) 2024-11-20T17:25:39,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123599596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:39,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123599597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:39,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/2ffd39fd83ee4b2aa378f0afcc68d180 2024-11-20T17:25:39,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/9e557d9d06794dd5b238d6836cabefcc as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/9e557d9d06794dd5b238d6836cabefcc 2024-11-20T17:25:39,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/9e557d9d06794dd5b238d6836cabefcc, entries=150, sequenceid=278, filesize=12.0 K 2024-11-20T17:25:39,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/6173c54dae1f49d2a16cd51ab361870f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6173c54dae1f49d2a16cd51ab361870f 2024-11-20T17:25:39,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6173c54dae1f49d2a16cd51ab361870f, entries=150, sequenceid=278, filesize=12.0 K 2024-11-20T17:25:39,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/2ffd39fd83ee4b2aa378f0afcc68d180 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2ffd39fd83ee4b2aa378f0afcc68d180 2024-11-20T17:25:39,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2ffd39fd83ee4b2aa378f0afcc68d180, entries=150, sequenceid=278, filesize=12.0 K 2024-11-20T17:25:39,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 9889f1e232eeda308e27b8e2d47795bd in 1270ms, sequenceid=278, compaction requested=false 2024-11-20T17:25:39,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T17:25:39,807 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-20T17:25:39,808 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-20T17:25:39,810 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:25:39,810 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:39,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:39,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:25:39,962 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:39,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T17:25:39,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:39,963 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:25:39,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:39,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:39,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:39,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:39,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:39,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:39,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/44dec480e68c4300a0bdb205bafa2b92 is 50, key is test_row_0/A:col10/1732123538477/Put/seqid=0 2024-11-20T17:25:39,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742115_1291 (size=12301) 2024-11-20T17:25:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:25:40,387 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/44dec480e68c4300a0bdb205bafa2b92 2024-11-20T17:25:40,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/b031ec5b6633443397c507f2342883e1 is 50, key is test_row_0/B:col10/1732123538477/Put/seqid=0 2024-11-20T17:25:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:25:40,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742116_1292 (size=12301) 2024-11-20T17:25:40,413 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/b031ec5b6633443397c507f2342883e1 2024-11-20T17:25:40,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/16caead2a0174092bf3ad00696cbc932 is 50, key is test_row_0/C:col10/1732123538477/Put/seqid=0 2024-11-20T17:25:40,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742117_1293 (size=12301) 2024-11-20T17:25:40,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:40,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123600633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:40,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:40,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123600633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:40,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123600736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:40,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123600736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:40,827 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/16caead2a0174092bf3ad00696cbc932 2024-11-20T17:25:40,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/44dec480e68c4300a0bdb205bafa2b92 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/44dec480e68c4300a0bdb205bafa2b92 2024-11-20T17:25:40,837 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/44dec480e68c4300a0bdb205bafa2b92, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T17:25:40,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/b031ec5b6633443397c507f2342883e1 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b031ec5b6633443397c507f2342883e1 2024-11-20T17:25:40,842 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b031ec5b6633443397c507f2342883e1, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T17:25:40,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/16caead2a0174092bf3ad00696cbc932 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/16caead2a0174092bf3ad00696cbc932 2024-11-20T17:25:40,847 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/16caead2a0174092bf3ad00696cbc932, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T17:25:40,847 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9889f1e232eeda308e27b8e2d47795bd in 884ms, sequenceid=291, compaction requested=true 2024-11-20T17:25:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-20T17:25:40,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-20T17:25:40,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T17:25:40,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0380 sec 2024-11-20T17:25:40,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.0420 sec 2024-11-20T17:25:40,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T17:25:40,914 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-20T17:25:40,915 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:40,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-20T17:25:40,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:25:40,916 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:40,922 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:40,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:40,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:40,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:25:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:40,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:40,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/da25b58d4bb0400aaece156460cb9d6e is 50, key is test_row_0/A:col10/1732123540632/Put/seqid=0 2024-11-20T17:25:40,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:40,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123600948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:40,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742118_1294 (size=12301) 2024-11-20T17:25:40,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:40,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123600950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:41,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:25:41,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:41,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123601052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:41,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:41,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123601053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:41,073 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:41,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:41,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:41,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:25:41,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:41,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:41,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:41,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:41,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123601255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:41,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:41,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123601256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:41,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/da25b58d4bb0400aaece156460cb9d6e 2024-11-20T17:25:41,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/d513ad0b92ce421dae270afcefd8db78 is 50, key is test_row_0/B:col10/1732123540632/Put/seqid=0 2024-11-20T17:25:41,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742119_1295 (size=12301) 2024-11-20T17:25:41,379 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:41,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:41,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:41,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,380 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:25:41,532 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:41,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:41,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:41,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:41,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123601558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:41,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:41,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123601559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:41,685 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:41,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:41,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:41,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/d513ad0b92ce421dae270afcefd8db78 2024-11-20T17:25:41,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/9959ae0bcfc941a3826e65066eb7496c is 50, key is test_row_0/C:col10/1732123540632/Put/seqid=0 2024-11-20T17:25:41,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742120_1296 (size=12301) 2024-11-20T17:25:41,838 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:41,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:41,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:41,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,991 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:41,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:41,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:41,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:41,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:41,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:25:42,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:42,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123602063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:42,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123602065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:42,144 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:42,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:42,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:42,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:42,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:42,145 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:42,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:42,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:42,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/9959ae0bcfc941a3826e65066eb7496c 2024-11-20T17:25:42,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/da25b58d4bb0400aaece156460cb9d6e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/da25b58d4bb0400aaece156460cb9d6e 2024-11-20T17:25:42,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/da25b58d4bb0400aaece156460cb9d6e, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T17:25:42,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/d513ad0b92ce421dae270afcefd8db78 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/d513ad0b92ce421dae270afcefd8db78 2024-11-20T17:25:42,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/d513ad0b92ce421dae270afcefd8db78, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T17:25:42,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/9959ae0bcfc941a3826e65066eb7496c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/9959ae0bcfc941a3826e65066eb7496c 2024-11-20T17:25:42,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/9959ae0bcfc941a3826e65066eb7496c, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T17:25:42,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9889f1e232eeda308e27b8e2d47795bd in 1254ms, sequenceid=318, compaction requested=true 2024-11-20T17:25:42,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:42,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:42,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:42,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:42,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:42,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:42,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:42,194 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:42,194 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:42,196 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:42,196 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:42,196 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/B is initiating minor compaction (all files) 2024-11-20T17:25:42,196 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/A is initiating minor compaction (all files) 2024-11-20T17:25:42,196 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/A in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:42,196 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/B in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:42,196 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/478a8ddc1ac94cd8bf6d3cabfa64d4ed, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/9e557d9d06794dd5b238d6836cabefcc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/44dec480e68c4300a0bdb205bafa2b92, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/da25b58d4bb0400aaece156460cb9d6e] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=48.5 K 2024-11-20T17:25:42,196 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b7dfc262bdc6456ab9c387ebdd53954e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6173c54dae1f49d2a16cd51ab361870f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b031ec5b6633443397c507f2342883e1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/d513ad0b92ce421dae270afcefd8db78] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=48.5 K 2024-11-20T17:25:42,196 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 478a8ddc1ac94cd8bf6d3cabfa64d4ed, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123537720 2024-11-20T17:25:42,196 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b7dfc262bdc6456ab9c387ebdd53954e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123537720 2024-11-20T17:25:42,197 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e557d9d06794dd5b238d6836cabefcc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732123538358 2024-11-20T17:25:42,197 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6173c54dae1f49d2a16cd51ab361870f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732123538358 2024-11-20T17:25:42,197 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44dec480e68c4300a0bdb205bafa2b92, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123538477 2024-11-20T17:25:42,197 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b031ec5b6633443397c507f2342883e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123538477 2024-11-20T17:25:42,198 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting da25b58d4bb0400aaece156460cb9d6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732123540626 2024-11-20T17:25:42,198 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting d513ad0b92ce421dae270afcefd8db78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732123540626 2024-11-20T17:25:42,207 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#A#compaction#252 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:42,207 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#B#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:42,207 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/157917d1d93149a683a763989e69d217 is 50, key is test_row_0/A:col10/1732123540632/Put/seqid=0 2024-11-20T17:25:42,208 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/f3297d025aa249df872b86b48fd11a1e is 50, key is test_row_0/B:col10/1732123540632/Put/seqid=0 2024-11-20T17:25:42,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742122_1298 (size=13017) 2024-11-20T17:25:42,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742121_1297 (size=13017) 2024-11-20T17:25:42,297 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:42,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T17:25:42,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:42,298 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-20T17:25:42,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:42,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:42,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:42,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:42,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:42,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:42,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/5d450bda837a4959897c801e418eb53f is 50, key is test_row_0/A:col10/1732123540947/Put/seqid=0 2024-11-20T17:25:42,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742123_1299 (size=12301) 2024-11-20T17:25:42,308 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/5d450bda837a4959897c801e418eb53f 2024-11-20T17:25:42,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/adecb9116cdc43f9a608a110a171b476 is 50, key is test_row_0/B:col10/1732123540947/Put/seqid=0 2024-11-20T17:25:42,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742124_1300 (size=12301) 2024-11-20T17:25:42,619 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/157917d1d93149a683a763989e69d217 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/157917d1d93149a683a763989e69d217 2024-11-20T17:25:42,619 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/f3297d025aa249df872b86b48fd11a1e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/f3297d025aa249df872b86b48fd11a1e 2024-11-20T17:25:42,624 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/A of 9889f1e232eeda308e27b8e2d47795bd into 157917d1d93149a683a763989e69d217(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:42,624 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:42,624 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/A, priority=12, startTime=1732123542194; duration=0sec 2024-11-20T17:25:42,625 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:42,625 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:A 2024-11-20T17:25:42,625 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:25:42,625 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/B of 9889f1e232eeda308e27b8e2d47795bd into f3297d025aa249df872b86b48fd11a1e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:42,625 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:42,625 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/B, priority=12, startTime=1732123542194; duration=0sec 2024-11-20T17:25:42,625 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:42,625 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:B 2024-11-20T17:25:42,626 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:25:42,626 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/C is initiating minor compaction (all files) 2024-11-20T17:25:42,626 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/C in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:42,626 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/66de1dd7041148208abee5b0347af120, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2ffd39fd83ee4b2aa378f0afcc68d180, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/16caead2a0174092bf3ad00696cbc932, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/9959ae0bcfc941a3826e65066eb7496c] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=48.5 K 2024-11-20T17:25:42,627 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66de1dd7041148208abee5b0347af120, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732123537720 2024-11-20T17:25:42,627 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ffd39fd83ee4b2aa378f0afcc68d180, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732123538358 2024-11-20T17:25:42,627 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16caead2a0174092bf3ad00696cbc932, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123538477 2024-11-20T17:25:42,628 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9959ae0bcfc941a3826e65066eb7496c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732123540626 2024-11-20T17:25:42,636 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#C#compaction#256 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:42,637 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/1989ab3253a64b678647472a7960a065 is 50, key is test_row_0/C:col10/1732123540632/Put/seqid=0 2024-11-20T17:25:42,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742125_1301 (size=13017) 2024-11-20T17:25:42,722 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/adecb9116cdc43f9a608a110a171b476 2024-11-20T17:25:42,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/cf84b542c97c47bb958a6e3ed014804d is 50, key is test_row_0/C:col10/1732123540947/Put/seqid=0 2024-11-20T17:25:42,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742126_1302 (size=12301) 2024-11-20T17:25:43,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:25:43,058 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/1989ab3253a64b678647472a7960a065 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/1989ab3253a64b678647472a7960a065 2024-11-20T17:25:43,063 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/C of 9889f1e232eeda308e27b8e2d47795bd into 1989ab3253a64b678647472a7960a065(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:43,063 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:43,063 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/C, priority=12, startTime=1732123542194; duration=0sec 2024-11-20T17:25:43,063 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:43,063 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:C 2024-11-20T17:25:43,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:43,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:43,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123603095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123603095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,146 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/cf84b542c97c47bb958a6e3ed014804d 2024-11-20T17:25:43,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/5d450bda837a4959897c801e418eb53f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/5d450bda837a4959897c801e418eb53f 2024-11-20T17:25:43,156 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/5d450bda837a4959897c801e418eb53f, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T17:25:43,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/adecb9116cdc43f9a608a110a171b476 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/adecb9116cdc43f9a608a110a171b476 2024-11-20T17:25:43,161 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/adecb9116cdc43f9a608a110a171b476, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T17:25:43,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/cf84b542c97c47bb958a6e3ed014804d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/cf84b542c97c47bb958a6e3ed014804d 2024-11-20T17:25:43,166 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/cf84b542c97c47bb958a6e3ed014804d, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T17:25:43,167 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 9889f1e232eeda308e27b8e2d47795bd in 869ms, sequenceid=327, compaction requested=false 2024-11-20T17:25:43,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:43,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:43,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-20T17:25:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-20T17:25:43,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-20T17:25:43,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2470 sec 2024-11-20T17:25:43,171 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.2550 sec 2024-11-20T17:25:43,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:43,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T17:25:43,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:43,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:43,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:43,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:43,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:43,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:43,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/8ffec4f0aff24712b161c54c466add12 is 50, key is test_row_0/A:col10/1732123543199/Put/seqid=0 2024-11-20T17:25:43,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123603205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123603206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742127_1303 (size=12301) 2024-11-20T17:25:43,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123603307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123603309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123603509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123603512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/8ffec4f0aff24712b161c54c466add12 2024-11-20T17:25:43,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/efddeee938ea40b081fe3366bca53d3a is 50, key is test_row_0/B:col10/1732123543199/Put/seqid=0 2024-11-20T17:25:43,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742128_1304 (size=12301) 2024-11-20T17:25:43,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:25:43,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123603813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:43,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:43,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123603817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:44,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/efddeee938ea40b081fe3366bca53d3a 2024-11-20T17:25:44,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/4719cc4d94844f2d8d2e183c153d0e83 is 50, key is test_row_0/C:col10/1732123543199/Put/seqid=0 2024-11-20T17:25:44,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742129_1305 (size=12301) 2024-11-20T17:25:44,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:44,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123604317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:44,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:44,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123604323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:44,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/4719cc4d94844f2d8d2e183c153d0e83 2024-11-20T17:25:44,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/8ffec4f0aff24712b161c54c466add12 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8ffec4f0aff24712b161c54c466add12 2024-11-20T17:25:44,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8ffec4f0aff24712b161c54c466add12, entries=150, sequenceid=360, filesize=12.0 K 2024-11-20T17:25:44,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/efddeee938ea40b081fe3366bca53d3a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/efddeee938ea40b081fe3366bca53d3a 2024-11-20T17:25:44,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/efddeee938ea40b081fe3366bca53d3a, entries=150, sequenceid=360, filesize=12.0 K 2024-11-20T17:25:44,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/4719cc4d94844f2d8d2e183c153d0e83 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4719cc4d94844f2d8d2e183c153d0e83 2024-11-20T17:25:44,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4719cc4d94844f2d8d2e183c153d0e83, entries=150, sequenceid=360, filesize=12.0 K 2024-11-20T17:25:44,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 9889f1e232eeda308e27b8e2d47795bd in 1264ms, sequenceid=360, compaction requested=true 2024-11-20T17:25:44,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:44,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:44,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:44,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:44,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:44,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:44,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:44,465 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:44,465 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:44,466 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:44,466 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:44,466 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/B is initiating minor compaction (all files) 2024-11-20T17:25:44,466 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/A is initiating minor compaction (all files) 2024-11-20T17:25:44,466 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/B in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:44,466 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/A in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:44,466 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/f3297d025aa249df872b86b48fd11a1e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/adecb9116cdc43f9a608a110a171b476, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/efddeee938ea40b081fe3366bca53d3a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=36.7 K 2024-11-20T17:25:44,466 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/157917d1d93149a683a763989e69d217, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/5d450bda837a4959897c801e418eb53f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8ffec4f0aff24712b161c54c466add12] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=36.7 K 2024-11-20T17:25:44,467 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting f3297d025aa249df872b86b48fd11a1e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732123540626 2024-11-20T17:25:44,467 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 157917d1d93149a683a763989e69d217, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732123540626 2024-11-20T17:25:44,467 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d450bda837a4959897c801e418eb53f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732123540946 2024-11-20T17:25:44,467 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting adecb9116cdc43f9a608a110a171b476, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732123540946 2024-11-20T17:25:44,467 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ffec4f0aff24712b161c54c466add12, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732123543092 2024-11-20T17:25:44,467 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting efddeee938ea40b081fe3366bca53d3a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732123543092 2024-11-20T17:25:44,476 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#B#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:44,477 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/ee8b8c41bb9e4858921dcfedfc0a883e is 50, key is test_row_0/B:col10/1732123543199/Put/seqid=0 2024-11-20T17:25:44,479 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#A#compaction#262 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:44,480 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/12f9a626709742178fd9d60d742f2ba7 is 50, key is test_row_0/A:col10/1732123543199/Put/seqid=0 2024-11-20T17:25:44,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742130_1306 (size=13119) 2024-11-20T17:25:44,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742131_1307 (size=13119) 2024-11-20T17:25:44,890 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/12f9a626709742178fd9d60d742f2ba7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/12f9a626709742178fd9d60d742f2ba7 2024-11-20T17:25:44,892 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/ee8b8c41bb9e4858921dcfedfc0a883e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ee8b8c41bb9e4858921dcfedfc0a883e 2024-11-20T17:25:44,895 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/A of 9889f1e232eeda308e27b8e2d47795bd into 12f9a626709742178fd9d60d742f2ba7(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:44,895 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:44,895 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/A, priority=13, startTime=1732123544465; duration=0sec 2024-11-20T17:25:44,895 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:44,895 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:A 2024-11-20T17:25:44,896 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:44,901 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:44,901 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/C is initiating minor compaction (all files) 2024-11-20T17:25:44,902 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/C in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:44,902 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/1989ab3253a64b678647472a7960a065, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/cf84b542c97c47bb958a6e3ed014804d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4719cc4d94844f2d8d2e183c153d0e83] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=36.7 K 2024-11-20T17:25:44,902 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1989ab3253a64b678647472a7960a065, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732123540626 2024-11-20T17:25:44,903 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf84b542c97c47bb958a6e3ed014804d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732123540946 2024-11-20T17:25:44,903 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/B of 9889f1e232eeda308e27b8e2d47795bd into ee8b8c41bb9e4858921dcfedfc0a883e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:44,903 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:44,903 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/B, priority=13, startTime=1732123544465; duration=0sec 2024-11-20T17:25:44,903 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:44,903 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:B 2024-11-20T17:25:44,903 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4719cc4d94844f2d8d2e183c153d0e83, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732123543092 2024-11-20T17:25:44,911 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#C#compaction#263 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:44,911 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/329f9729418a46ddb822934ee617a94a is 50, key is test_row_0/C:col10/1732123543199/Put/seqid=0 2024-11-20T17:25:44,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742132_1308 (size=13119) 2024-11-20T17:25:44,923 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/329f9729418a46ddb822934ee617a94a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/329f9729418a46ddb822934ee617a94a 2024-11-20T17:25:44,928 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/C of 9889f1e232eeda308e27b8e2d47795bd into 329f9729418a46ddb822934ee617a94a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:44,928 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:44,928 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/C, priority=13, startTime=1732123544465; duration=0sec 2024-11-20T17:25:44,928 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:44,928 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:C 2024-11-20T17:25:45,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T17:25:45,021 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T17:25:45,025 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:45,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-20T17:25:45,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:25:45,027 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:45,028 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:45,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:25:45,180 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:45,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T17:25:45,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,180 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T17:25:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:45,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/bf825313efec43c0a066aa974a5878b5 is 50, key is test_row_0/A:col10/1732123543203/Put/seqid=0 2024-11-20T17:25:45,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742133_1309 (size=9857) 2024-11-20T17:25:45,190 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/bf825313efec43c0a066aa974a5878b5 2024-11-20T17:25:45,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/e23f5576a3024262892d54568c02884c is 50, key is test_row_0/B:col10/1732123543203/Put/seqid=0 2024-11-20T17:25:45,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742134_1310 (size=9857) 2024-11-20T17:25:45,203 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/e23f5576a3024262892d54568c02884c 2024-11-20T17:25:45,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/a31fd71212a6481487bdc84aa6101873 is 50, key is test_row_0/C:col10/1732123543203/Put/seqid=0 2024-11-20T17:25:45,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742135_1311 (size=9857) 2024-11-20T17:25:45,215 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/a31fd71212a6481487bdc84aa6101873 2024-11-20T17:25:45,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/bf825313efec43c0a066aa974a5878b5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/bf825313efec43c0a066aa974a5878b5 2024-11-20T17:25:45,227 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/bf825313efec43c0a066aa974a5878b5, entries=100, sequenceid=369, filesize=9.6 K 2024-11-20T17:25:45,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/e23f5576a3024262892d54568c02884c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e23f5576a3024262892d54568c02884c 2024-11-20T17:25:45,232 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e23f5576a3024262892d54568c02884c, entries=100, sequenceid=369, filesize=9.6 K 2024-11-20T17:25:45,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/a31fd71212a6481487bdc84aa6101873 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a31fd71212a6481487bdc84aa6101873 2024-11-20T17:25:45,238 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a31fd71212a6481487bdc84aa6101873, entries=100, sequenceid=369, filesize=9.6 K 2024-11-20T17:25:45,238 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 9889f1e232eeda308e27b8e2d47795bd in 58ms, sequenceid=369, compaction requested=false 2024-11-20T17:25:45,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:45,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-20T17:25:45,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-20T17:25:45,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T17:25:45,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 211 msec 2024-11-20T17:25:45,242 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 216 msec 2024-11-20T17:25:45,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T17:25:45,329 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-20T17:25:45,330 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:25:45,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T17:25:45,331 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:25:45,332 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:25:45,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:25:45,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:25:45,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:45,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:25:45,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:45,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:45,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:45,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:45,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:45,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:45,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/b0d642f0d6da417d8a5efee39d955176 is 50, key is test_row_0/A:col10/1732123545327/Put/seqid=0 2024-11-20T17:25:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742136_1312 (size=12301) 2024-11-20T17:25:45,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:45,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123605375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:45,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:45,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123605375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:45,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:25:45,451 DEBUG [Thread-1097 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df84068 to 127.0.0.1:56028 2024-11-20T17:25:45,451 DEBUG [Thread-1097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:45,451 DEBUG [Thread-1095 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e5c7476 to 127.0.0.1:56028 2024-11-20T17:25:45,452 DEBUG [Thread-1095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:45,452 DEBUG [Thread-1099 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x644774bd to 127.0.0.1:56028 2024-11-20T17:25:45,452 DEBUG [Thread-1099 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:45,453 DEBUG [Thread-1093 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7507573f to 127.0.0.1:56028 2024-11-20T17:25:45,453 DEBUG [Thread-1093 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:45,453 DEBUG [Thread-1091 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6119e7 to 127.0.0.1:56028 2024-11-20T17:25:45,453 DEBUG [Thread-1091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:45,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:45,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123605478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:45,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:45,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123605478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:45,483 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:45,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:45,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:45,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,635 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:45,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:45,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:45,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:25:45,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123605679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:45,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123605680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:45,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/b0d642f0d6da417d8a5efee39d955176 2024-11-20T17:25:45,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/a1596fb05e3c416c98c18e64ceeba50a is 50, key is test_row_0/B:col10/1732123545327/Put/seqid=0 2024-11-20T17:25:45,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742137_1313 (size=12301) 2024-11-20T17:25:45,787 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:45,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:45,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:45,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:25:45,940 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:45,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:45,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:45,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:45,941 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:45,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:45,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123605981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:45,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123605983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:46,092 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:46,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:46,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:46,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/a1596fb05e3c416c98c18e64ceeba50a 2024-11-20T17:25:46,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/c73a17c0d66f4f35af396aaeadc5b749 is 50, key is test_row_0/C:col10/1732123545327/Put/seqid=0 2024-11-20T17:25:46,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742138_1314 (size=12301) 2024-11-20T17:25:46,245 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:46,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:46,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:46,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,398 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:46,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:46,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:46,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:25:46,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49602 deadline: 1732123606458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:46,459 DEBUG [Thread-1088 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:46,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732123606461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:46,462 DEBUG [Thread-1080 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18183 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:46,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49642 deadline: 1732123606483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:46,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49662 deadline: 1732123606484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:46,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:25:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49614 deadline: 1732123606488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 2024-11-20T17:25:46,489 DEBUG [Thread-1082 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18210 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., hostname=d514dc944523,44015,1732123455293, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:25:46,550 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:46,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:46,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:46,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:25:46,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/c73a17c0d66f4f35af396aaeadc5b749 2024-11-20T17:25:46,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/b0d642f0d6da417d8a5efee39d955176 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b0d642f0d6da417d8a5efee39d955176 2024-11-20T17:25:46,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b0d642f0d6da417d8a5efee39d955176, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T17:25:46,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/a1596fb05e3c416c98c18e64ceeba50a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/a1596fb05e3c416c98c18e64ceeba50a 2024-11-20T17:25:46,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/a1596fb05e3c416c98c18e64ceeba50a, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T17:25:46,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/c73a17c0d66f4f35af396aaeadc5b749 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c73a17c0d66f4f35af396aaeadc5b749 2024-11-20T17:25:46,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c73a17c0d66f4f35af396aaeadc5b749, entries=150, sequenceid=380, filesize=12.0 K 2024-11-20T17:25:46,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9889f1e232eeda308e27b8e2d47795bd in 1245ms, sequenceid=380, compaction requested=true 2024-11-20T17:25:46,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:46,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:25:46,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:46,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:25:46,586 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:46,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:46,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9889f1e232eeda308e27b8e2d47795bd:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:25:46,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:46,586 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:46,586 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:46,586 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:46,586 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/B is initiating minor compaction (all files) 2024-11-20T17:25:46,586 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/A is initiating minor compaction (all files) 2024-11-20T17:25:46,587 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/A in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,587 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/B in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,587 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/12f9a626709742178fd9d60d742f2ba7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/bf825313efec43c0a066aa974a5878b5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b0d642f0d6da417d8a5efee39d955176] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=34.5 K 2024-11-20T17:25:46,587 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ee8b8c41bb9e4858921dcfedfc0a883e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e23f5576a3024262892d54568c02884c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/a1596fb05e3c416c98c18e64ceeba50a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=34.5 K 2024-11-20T17:25:46,587 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12f9a626709742178fd9d60d742f2ba7, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732123543092 2024-11-20T17:25:46,587 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ee8b8c41bb9e4858921dcfedfc0a883e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732123543092 2024-11-20T17:25:46,587 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf825313efec43c0a066aa974a5878b5, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732123543203 2024-11-20T17:25:46,587 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting e23f5576a3024262892d54568c02884c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732123543203 2024-11-20T17:25:46,588 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0d642f0d6da417d8a5efee39d955176, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732123545327 2024-11-20T17:25:46,588 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a1596fb05e3c416c98c18e64ceeba50a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732123545327 2024-11-20T17:25:46,594 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#B#compaction#270 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:46,595 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/72fa00f710784bb0a3a24486317d1fd3 is 50, key is test_row_0/B:col10/1732123545327/Put/seqid=0 2024-11-20T17:25:46,595 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#A#compaction#271 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:46,596 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/36dda3ab383644c9a3212c3b9b8b88fb is 50, key is test_row_0/A:col10/1732123545327/Put/seqid=0 2024-11-20T17:25:46,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742139_1315 (size=13221) 2024-11-20T17:25:46,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742140_1316 (size=13221) 2024-11-20T17:25:46,703 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:46,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T17:25:46,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:46,704 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:25:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:46,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:46,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/db61bb8e09f64d389c0d398e44bcf474 is 50, key is test_row_0/A:col10/1732123545366/Put/seqid=0 2024-11-20T17:25:46,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742141_1317 (size=12301) 2024-11-20T17:25:47,005 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/36dda3ab383644c9a3212c3b9b8b88fb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/36dda3ab383644c9a3212c3b9b8b88fb 2024-11-20T17:25:47,005 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/72fa00f710784bb0a3a24486317d1fd3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/72fa00f710784bb0a3a24486317d1fd3 2024-11-20T17:25:47,008 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/A of 9889f1e232eeda308e27b8e2d47795bd into 36dda3ab383644c9a3212c3b9b8b88fb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:47,008 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/B of 9889f1e232eeda308e27b8e2d47795bd into 72fa00f710784bb0a3a24486317d1fd3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:47,008 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:47,008 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:47,008 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/A, priority=13, startTime=1732123546585; duration=0sec 2024-11-20T17:25:47,008 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/B, priority=13, startTime=1732123546586; duration=0sec 2024-11-20T17:25:47,009 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:25:47,009 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:B 2024-11-20T17:25:47,009 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:47,009 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:A 2024-11-20T17:25:47,009 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:25:47,009 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:25:47,010 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 9889f1e232eeda308e27b8e2d47795bd/C is initiating minor compaction (all files) 2024-11-20T17:25:47,010 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9889f1e232eeda308e27b8e2d47795bd/C in TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:47,010 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/329f9729418a46ddb822934ee617a94a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a31fd71212a6481487bdc84aa6101873, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c73a17c0d66f4f35af396aaeadc5b749] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp, totalSize=34.5 K 2024-11-20T17:25:47,010 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 329f9729418a46ddb822934ee617a94a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1732123543092 2024-11-20T17:25:47,010 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a31fd71212a6481487bdc84aa6101873, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1732123543203 2024-11-20T17:25:47,010 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c73a17c0d66f4f35af396aaeadc5b749, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732123545327 2024-11-20T17:25:47,016 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9889f1e232eeda308e27b8e2d47795bd#C#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:25:47,016 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/6da0eb0eea3947a685305b8f5fa404d2 is 50, key is test_row_0/C:col10/1732123545327/Put/seqid=0 2024-11-20T17:25:47,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742142_1318 (size=13221) 2024-11-20T17:25:47,112 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/db61bb8e09f64d389c0d398e44bcf474 2024-11-20T17:25:47,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/e28335fe5376422dae76613de8ddfe6f is 50, key is test_row_0/B:col10/1732123545366/Put/seqid=0 2024-11-20T17:25:47,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742143_1319 (size=12301) 2024-11-20T17:25:47,424 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/6da0eb0eea3947a685305b8f5fa404d2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/6da0eb0eea3947a685305b8f5fa404d2 2024-11-20T17:25:47,428 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9889f1e232eeda308e27b8e2d47795bd/C of 9889f1e232eeda308e27b8e2d47795bd into 6da0eb0eea3947a685305b8f5fa404d2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:25:47,428 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:47,428 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd., storeName=9889f1e232eeda308e27b8e2d47795bd/C, priority=13, startTime=1732123546586; duration=0sec 2024-11-20T17:25:47,428 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:25:47,428 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9889f1e232eeda308e27b8e2d47795bd:C 2024-11-20T17:25:47,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:25:47,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:47,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. as already flushing 2024-11-20T17:25:47,486 DEBUG [Thread-1084 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45426917 to 127.0.0.1:56028 2024-11-20T17:25:47,486 DEBUG [Thread-1084 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:47,489 DEBUG [Thread-1086 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e7fc60d to 127.0.0.1:56028 2024-11-20T17:25:47,489 DEBUG [Thread-1086 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:47,523 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/e28335fe5376422dae76613de8ddfe6f 2024-11-20T17:25:47,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/72bc6d140c2943c592e1d87504501039 is 50, key is test_row_0/C:col10/1732123545366/Put/seqid=0 2024-11-20T17:25:47,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742144_1320 (size=12301) 2024-11-20T17:25:47,933 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/72bc6d140c2943c592e1d87504501039 2024-11-20T17:25:47,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/db61bb8e09f64d389c0d398e44bcf474 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/db61bb8e09f64d389c0d398e44bcf474 2024-11-20T17:25:47,940 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/db61bb8e09f64d389c0d398e44bcf474, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T17:25:47,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/e28335fe5376422dae76613de8ddfe6f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e28335fe5376422dae76613de8ddfe6f 2024-11-20T17:25:47,944 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e28335fe5376422dae76613de8ddfe6f, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T17:25:47,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/72bc6d140c2943c592e1d87504501039 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/72bc6d140c2943c592e1d87504501039 2024-11-20T17:25:47,947 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/72bc6d140c2943c592e1d87504501039, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T17:25:47,948 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=13.42 KB/13740 for 9889f1e232eeda308e27b8e2d47795bd in 1244ms, sequenceid=406, compaction requested=false 2024-11-20T17:25:47,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:47,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:47,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T17:25:47,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T17:25:47,950 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T17:25:47,950 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6170 sec 2024-11-20T17:25:47,952 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.6210 sec 2024-11-20T17:25:49,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T17:25:49,441 INFO [Thread-1090 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T17:25:56,486 DEBUG [Thread-1088 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e66ea50 to 127.0.0.1:56028 2024-11-20T17:25:56,487 DEBUG [Thread-1088 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:56,551 DEBUG [Thread-1080 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7792c763 to 127.0.0.1:56028 2024-11-20T17:25:56,551 DEBUG [Thread-1080 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:56,589 DEBUG [Thread-1082 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c8a18c7 to 127.0.0.1:56028 2024-11-20T17:25:56,589 DEBUG [Thread-1082 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 25 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 130 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 124 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9017 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8742 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8647 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9065 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8745 2024-11-20T17:25:56,589 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:25:56,589 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:25:56,589 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72a7721c to 127.0.0.1:56028 2024-11-20T17:25:56,589 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:25:56,590 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:25:56,590 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:25:56,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:56,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:25:56,593 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123556593"}]},"ts":"1732123556593"} 2024-11-20T17:25:56,594 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:25:56,596 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:25:56,596 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:25:56,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9889f1e232eeda308e27b8e2d47795bd, UNASSIGN}] 2024-11-20T17:25:56,598 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=85, ppid=84, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9889f1e232eeda308e27b8e2d47795bd, UNASSIGN 2024-11-20T17:25:56,598 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=85 updating hbase:meta row=9889f1e232eeda308e27b8e2d47795bd, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:25:56,599 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:25:56,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; CloseRegionProcedure 9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:25:56,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:25:56,750 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:56,751 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(124): Close 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1681): Closing 9889f1e232eeda308e27b8e2d47795bd, disabling compactions & flushes 2024-11-20T17:25:56,751 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. after waiting 0 ms 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:56,751 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(2837): Flushing 9889f1e232eeda308e27b8e2d47795bd 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=A 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=B 2024-11-20T17:25:56,751 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:56,752 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9889f1e232eeda308e27b8e2d47795bd, store=C 2024-11-20T17:25:56,752 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:25:56,755 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/71c7865df58640718ace99bcd2bafa86 is 50, key is test_row_1/A:col10/1732123547489/Put/seqid=0 2024-11-20T17:25:56,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742145_1321 (size=9857) 2024-11-20T17:25:56,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:25:57,159 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/71c7865df58640718ace99bcd2bafa86 2024-11-20T17:25:57,165 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/89374c84b8734680b3ac74d4f39f4b85 is 50, key is test_row_1/B:col10/1732123547489/Put/seqid=0 2024-11-20T17:25:57,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742146_1322 (size=9857) 2024-11-20T17:25:57,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:25:57,570 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/89374c84b8734680b3ac74d4f39f4b85 2024-11-20T17:25:57,575 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d539d449d48445f7ac3bb6d597fff5c8 is 50, key is test_row_1/C:col10/1732123547489/Put/seqid=0 2024-11-20T17:25:57,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742147_1323 (size=9857) 2024-11-20T17:25:57,579 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=417 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d539d449d48445f7ac3bb6d597fff5c8 2024-11-20T17:25:57,582 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/A/71c7865df58640718ace99bcd2bafa86 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/71c7865df58640718ace99bcd2bafa86 2024-11-20T17:25:57,586 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/71c7865df58640718ace99bcd2bafa86, entries=100, sequenceid=417, filesize=9.6 K 2024-11-20T17:25:57,586 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/B/89374c84b8734680b3ac74d4f39f4b85 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/89374c84b8734680b3ac74d4f39f4b85 2024-11-20T17:25:57,589 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/89374c84b8734680b3ac74d4f39f4b85, entries=100, sequenceid=417, filesize=9.6 K 2024-11-20T17:25:57,589 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/.tmp/C/d539d449d48445f7ac3bb6d597fff5c8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d539d449d48445f7ac3bb6d597fff5c8 2024-11-20T17:25:57,592 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d539d449d48445f7ac3bb6d597fff5c8, entries=100, sequenceid=417, filesize=9.6 K 2024-11-20T17:25:57,593 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 9889f1e232eeda308e27b8e2d47795bd in 842ms, sequenceid=417, compaction requested=true 2024-11-20T17:25:57,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/0d8a536fe969488cae799164c7ce2df0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/592598c9d6184ade8195b96cc0340da3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/cd5cceaf177942eeab87eb6c63ea3e34, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8adfba8509964ebbae77a78c61ad0ceb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/78518f8b580349888c9b07ba054619ec, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b8cc4297fabf4993a2e4b5d5236e9594, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/4567f8464d47411ca345360d092ef658, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/538d014203874a1ab2b46746aa83df78, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/888a2e970ea3401d873021539c0cbc52, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/af7a5a6d387e43fb989968e90cd6288e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8e0f283886c944b49cd5afcbe2cdffdc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/77c775a35323417fbf0f8484bbda755d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/17cd00c6dadd48b1931f761c9810ce9b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/2d555a824bbf42bd82d9cf7721ae00ad, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/412d3d6ebc794c2ab736911dbd185ae8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/7d94b86164c5441d9970af6b14dc3983, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/69f0169e44634e83897f485373507b54, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/478a8ddc1ac94cd8bf6d3cabfa64d4ed, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/9e557d9d06794dd5b238d6836cabefcc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/44dec480e68c4300a0bdb205bafa2b92, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/157917d1d93149a683a763989e69d217, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/da25b58d4bb0400aaece156460cb9d6e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/5d450bda837a4959897c801e418eb53f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/12f9a626709742178fd9d60d742f2ba7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8ffec4f0aff24712b161c54c466add12, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/bf825313efec43c0a066aa974a5878b5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b0d642f0d6da417d8a5efee39d955176] to archive 2024-11-20T17:25:57,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:25:57,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/0d8a536fe969488cae799164c7ce2df0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/0d8a536fe969488cae799164c7ce2df0 2024-11-20T17:25:57,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/592598c9d6184ade8195b96cc0340da3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/592598c9d6184ade8195b96cc0340da3 2024-11-20T17:25:57,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/cd5cceaf177942eeab87eb6c63ea3e34 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/cd5cceaf177942eeab87eb6c63ea3e34 2024-11-20T17:25:57,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8adfba8509964ebbae77a78c61ad0ceb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8adfba8509964ebbae77a78c61ad0ceb 2024-11-20T17:25:57,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/78518f8b580349888c9b07ba054619ec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/78518f8b580349888c9b07ba054619ec 2024-11-20T17:25:57,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b8cc4297fabf4993a2e4b5d5236e9594 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b8cc4297fabf4993a2e4b5d5236e9594 2024-11-20T17:25:57,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/4567f8464d47411ca345360d092ef658 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/4567f8464d47411ca345360d092ef658 2024-11-20T17:25:57,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/538d014203874a1ab2b46746aa83df78 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/538d014203874a1ab2b46746aa83df78 2024-11-20T17:25:57,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/888a2e970ea3401d873021539c0cbc52 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/888a2e970ea3401d873021539c0cbc52 2024-11-20T17:25:57,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/af7a5a6d387e43fb989968e90cd6288e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/af7a5a6d387e43fb989968e90cd6288e 2024-11-20T17:25:57,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8e0f283886c944b49cd5afcbe2cdffdc to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8e0f283886c944b49cd5afcbe2cdffdc 2024-11-20T17:25:57,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/77c775a35323417fbf0f8484bbda755d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/77c775a35323417fbf0f8484bbda755d 2024-11-20T17:25:57,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/17cd00c6dadd48b1931f761c9810ce9b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/17cd00c6dadd48b1931f761c9810ce9b 2024-11-20T17:25:57,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/2d555a824bbf42bd82d9cf7721ae00ad to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/2d555a824bbf42bd82d9cf7721ae00ad 2024-11-20T17:25:57,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/412d3d6ebc794c2ab736911dbd185ae8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/412d3d6ebc794c2ab736911dbd185ae8 2024-11-20T17:25:57,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/7d94b86164c5441d9970af6b14dc3983 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/7d94b86164c5441d9970af6b14dc3983 2024-11-20T17:25:57,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/69f0169e44634e83897f485373507b54 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/69f0169e44634e83897f485373507b54 2024-11-20T17:25:57,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/478a8ddc1ac94cd8bf6d3cabfa64d4ed to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/478a8ddc1ac94cd8bf6d3cabfa64d4ed 2024-11-20T17:25:57,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/9e557d9d06794dd5b238d6836cabefcc to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/9e557d9d06794dd5b238d6836cabefcc 2024-11-20T17:25:57,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/44dec480e68c4300a0bdb205bafa2b92 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/44dec480e68c4300a0bdb205bafa2b92 2024-11-20T17:25:57,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/157917d1d93149a683a763989e69d217 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/157917d1d93149a683a763989e69d217 2024-11-20T17:25:57,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/da25b58d4bb0400aaece156460cb9d6e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/da25b58d4bb0400aaece156460cb9d6e 2024-11-20T17:25:57,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/5d450bda837a4959897c801e418eb53f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/5d450bda837a4959897c801e418eb53f 2024-11-20T17:25:57,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/12f9a626709742178fd9d60d742f2ba7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/12f9a626709742178fd9d60d742f2ba7 2024-11-20T17:25:57,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8ffec4f0aff24712b161c54c466add12 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/8ffec4f0aff24712b161c54c466add12 2024-11-20T17:25:57,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/bf825313efec43c0a066aa974a5878b5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/bf825313efec43c0a066aa974a5878b5 2024-11-20T17:25:57,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b0d642f0d6da417d8a5efee39d955176 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/b0d642f0d6da417d8a5efee39d955176 2024-11-20T17:25:57,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/9ecd303d61a04429a7127b1e9910d8ac, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/559ede58e3474cec9ceb436c40225acb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/bbeff0465780416087afd47d69c28c1b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ea7f199cbece42f78a1059cc39518047, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/79bb65a86de94ca79dfdc917be201b1e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/49b6eb8059ce49efa18fa72ada98efd7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7bb9bf7f86114efb8b7afd3021240577, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6f95f9d93dcb44d4a71dd260e8f1cd4e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/462d5023b324466297da5c51cb6f4c20, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7fb0cdd817cc468c8cd595e21aa63bef, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/4af8db9a466f4806a4a1ba5da28f7a19, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/34fec16b53e3424ebe42b450926f7970, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/2a0f8efc4b834f59bc442648037667d7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/8b04ec2ba4be4fb2a21baaaf3ce58a90, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/3aa270a121554fc8b85b953be55e7c81, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/96103357b86643eb9a3e969b12843476, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b7dfc262bdc6456ab9c387ebdd53954e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/133dfba96fdd479d91afab5bd0160aaf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6173c54dae1f49d2a16cd51ab361870f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b031ec5b6633443397c507f2342883e1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/f3297d025aa249df872b86b48fd11a1e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/d513ad0b92ce421dae270afcefd8db78, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/adecb9116cdc43f9a608a110a171b476, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ee8b8c41bb9e4858921dcfedfc0a883e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/efddeee938ea40b081fe3366bca53d3a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e23f5576a3024262892d54568c02884c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/a1596fb05e3c416c98c18e64ceeba50a] to archive 2024-11-20T17:25:57,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:25:57,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/9ecd303d61a04429a7127b1e9910d8ac to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/9ecd303d61a04429a7127b1e9910d8ac 2024-11-20T17:25:57,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/559ede58e3474cec9ceb436c40225acb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/559ede58e3474cec9ceb436c40225acb 2024-11-20T17:25:57,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/bbeff0465780416087afd47d69c28c1b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/bbeff0465780416087afd47d69c28c1b 2024-11-20T17:25:57,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ea7f199cbece42f78a1059cc39518047 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ea7f199cbece42f78a1059cc39518047 2024-11-20T17:25:57,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/79bb65a86de94ca79dfdc917be201b1e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/79bb65a86de94ca79dfdc917be201b1e 2024-11-20T17:25:57,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/49b6eb8059ce49efa18fa72ada98efd7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/49b6eb8059ce49efa18fa72ada98efd7 2024-11-20T17:25:57,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7bb9bf7f86114efb8b7afd3021240577 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7bb9bf7f86114efb8b7afd3021240577 2024-11-20T17:25:57,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6f95f9d93dcb44d4a71dd260e8f1cd4e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6f95f9d93dcb44d4a71dd260e8f1cd4e 2024-11-20T17:25:57,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/462d5023b324466297da5c51cb6f4c20 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/462d5023b324466297da5c51cb6f4c20 2024-11-20T17:25:57,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7fb0cdd817cc468c8cd595e21aa63bef to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/7fb0cdd817cc468c8cd595e21aa63bef 2024-11-20T17:25:57,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/4af8db9a466f4806a4a1ba5da28f7a19 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/4af8db9a466f4806a4a1ba5da28f7a19 2024-11-20T17:25:57,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/34fec16b53e3424ebe42b450926f7970 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/34fec16b53e3424ebe42b450926f7970 2024-11-20T17:25:57,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/2a0f8efc4b834f59bc442648037667d7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/2a0f8efc4b834f59bc442648037667d7 2024-11-20T17:25:57,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/8b04ec2ba4be4fb2a21baaaf3ce58a90 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/8b04ec2ba4be4fb2a21baaaf3ce58a90 2024-11-20T17:25:57,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/3aa270a121554fc8b85b953be55e7c81 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/3aa270a121554fc8b85b953be55e7c81 2024-11-20T17:25:57,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/96103357b86643eb9a3e969b12843476 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/96103357b86643eb9a3e969b12843476 2024-11-20T17:25:57,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b7dfc262bdc6456ab9c387ebdd53954e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b7dfc262bdc6456ab9c387ebdd53954e 2024-11-20T17:25:57,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/133dfba96fdd479d91afab5bd0160aaf to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/133dfba96fdd479d91afab5bd0160aaf 2024-11-20T17:25:57,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6173c54dae1f49d2a16cd51ab361870f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/6173c54dae1f49d2a16cd51ab361870f 2024-11-20T17:25:57,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b031ec5b6633443397c507f2342883e1 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/b031ec5b6633443397c507f2342883e1 2024-11-20T17:25:57,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/f3297d025aa249df872b86b48fd11a1e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/f3297d025aa249df872b86b48fd11a1e 2024-11-20T17:25:57,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/d513ad0b92ce421dae270afcefd8db78 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/d513ad0b92ce421dae270afcefd8db78 2024-11-20T17:25:57,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/adecb9116cdc43f9a608a110a171b476 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/adecb9116cdc43f9a608a110a171b476 2024-11-20T17:25:57,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ee8b8c41bb9e4858921dcfedfc0a883e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/ee8b8c41bb9e4858921dcfedfc0a883e 2024-11-20T17:25:57,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/efddeee938ea40b081fe3366bca53d3a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/efddeee938ea40b081fe3366bca53d3a 2024-11-20T17:25:57,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e23f5576a3024262892d54568c02884c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e23f5576a3024262892d54568c02884c 2024-11-20T17:25:57,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/a1596fb05e3c416c98c18e64ceeba50a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/a1596fb05e3c416c98c18e64ceeba50a 2024-11-20T17:25:57,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0a23ebb35f4f4b528958717712cf93cb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d3abd3236dae46e386c614afd920498c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d083acd754c34e5bb0e2231749b1c894, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a5e5332abc554491a833f700e9afa2cf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bafca3295bd84d3fa21b4bb5dcca4166, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2258778756f74c679ce8139f669bb4d3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4d3c52eb00ea4afe9c587b90632588f0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/46b1ea963a8147c09f27a471e07cd7b6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/823991bfd679429d99d4ddedf9ee33ed, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/257d2a8166fe40598cb6a76c9911c119, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/998f57c729f846db826424f5b440be63, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/114a4cc8fbb947dda2ec886ae874de6f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0b5323fbc13748de9f4b60cfe59d1e44, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/707273678ed14e7b9e0955902d2cb6b9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/632204cabe6142a1b6952f839260ec1d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c6f492708edd4a478c34336e348638d6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/66de1dd7041148208abee5b0347af120, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bdd69299ac4d4dad93cfb65fc85e210d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2ffd39fd83ee4b2aa378f0afcc68d180, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/16caead2a0174092bf3ad00696cbc932, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/1989ab3253a64b678647472a7960a065, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/9959ae0bcfc941a3826e65066eb7496c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/cf84b542c97c47bb958a6e3ed014804d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/329f9729418a46ddb822934ee617a94a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4719cc4d94844f2d8d2e183c153d0e83, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a31fd71212a6481487bdc84aa6101873, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c73a17c0d66f4f35af396aaeadc5b749] to archive 2024-11-20T17:25:57,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:25:57,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0a23ebb35f4f4b528958717712cf93cb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0a23ebb35f4f4b528958717712cf93cb 2024-11-20T17:25:57,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d3abd3236dae46e386c614afd920498c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d3abd3236dae46e386c614afd920498c 2024-11-20T17:25:57,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d083acd754c34e5bb0e2231749b1c894 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d083acd754c34e5bb0e2231749b1c894 2024-11-20T17:25:57,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a5e5332abc554491a833f700e9afa2cf to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a5e5332abc554491a833f700e9afa2cf 2024-11-20T17:25:57,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bafca3295bd84d3fa21b4bb5dcca4166 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bafca3295bd84d3fa21b4bb5dcca4166 2024-11-20T17:25:57,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2258778756f74c679ce8139f669bb4d3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2258778756f74c679ce8139f669bb4d3 2024-11-20T17:25:57,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4d3c52eb00ea4afe9c587b90632588f0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4d3c52eb00ea4afe9c587b90632588f0 2024-11-20T17:25:57,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/46b1ea963a8147c09f27a471e07cd7b6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/46b1ea963a8147c09f27a471e07cd7b6 2024-11-20T17:25:57,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/823991bfd679429d99d4ddedf9ee33ed to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/823991bfd679429d99d4ddedf9ee33ed 2024-11-20T17:25:57,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/257d2a8166fe40598cb6a76c9911c119 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/257d2a8166fe40598cb6a76c9911c119 2024-11-20T17:25:57,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/998f57c729f846db826424f5b440be63 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/998f57c729f846db826424f5b440be63 2024-11-20T17:25:57,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/114a4cc8fbb947dda2ec886ae874de6f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/114a4cc8fbb947dda2ec886ae874de6f 2024-11-20T17:25:57,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0b5323fbc13748de9f4b60cfe59d1e44 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/0b5323fbc13748de9f4b60cfe59d1e44 2024-11-20T17:25:57,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/707273678ed14e7b9e0955902d2cb6b9 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/707273678ed14e7b9e0955902d2cb6b9 2024-11-20T17:25:57,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/632204cabe6142a1b6952f839260ec1d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/632204cabe6142a1b6952f839260ec1d 2024-11-20T17:25:57,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c6f492708edd4a478c34336e348638d6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c6f492708edd4a478c34336e348638d6 2024-11-20T17:25:57,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/66de1dd7041148208abee5b0347af120 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/66de1dd7041148208abee5b0347af120 2024-11-20T17:25:57,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bdd69299ac4d4dad93cfb65fc85e210d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/bdd69299ac4d4dad93cfb65fc85e210d 2024-11-20T17:25:57,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2ffd39fd83ee4b2aa378f0afcc68d180 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/2ffd39fd83ee4b2aa378f0afcc68d180 2024-11-20T17:25:57,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/16caead2a0174092bf3ad00696cbc932 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/16caead2a0174092bf3ad00696cbc932 2024-11-20T17:25:57,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/1989ab3253a64b678647472a7960a065 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/1989ab3253a64b678647472a7960a065 2024-11-20T17:25:57,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/9959ae0bcfc941a3826e65066eb7496c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/9959ae0bcfc941a3826e65066eb7496c 2024-11-20T17:25:57,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/cf84b542c97c47bb958a6e3ed014804d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/cf84b542c97c47bb958a6e3ed014804d 2024-11-20T17:25:57,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/329f9729418a46ddb822934ee617a94a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/329f9729418a46ddb822934ee617a94a 2024-11-20T17:25:57,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4719cc4d94844f2d8d2e183c153d0e83 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/4719cc4d94844f2d8d2e183c153d0e83 2024-11-20T17:25:57,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a31fd71212a6481487bdc84aa6101873 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/a31fd71212a6481487bdc84aa6101873 2024-11-20T17:25:57,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c73a17c0d66f4f35af396aaeadc5b749 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/c73a17c0d66f4f35af396aaeadc5b749 2024-11-20T17:25:57,673 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/recovered.edits/420.seqid, newMaxSeqId=420, maxSeqId=1 2024-11-20T17:25:57,673 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd. 2024-11-20T17:25:57,673 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1635): Region close journal for 9889f1e232eeda308e27b8e2d47795bd: 2024-11-20T17:25:57,674 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(170): Closed 9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:57,675 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=85 updating hbase:meta row=9889f1e232eeda308e27b8e2d47795bd, regionState=CLOSED 2024-11-20T17:25:57,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T17:25:57,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; CloseRegionProcedure 9889f1e232eeda308e27b8e2d47795bd, server=d514dc944523,44015,1732123455293 in 1.0770 sec 2024-11-20T17:25:57,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=84 2024-11-20T17:25:57,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=84, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9889f1e232eeda308e27b8e2d47795bd, UNASSIGN in 1.0800 sec 2024-11-20T17:25:57,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T17:25:57,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.0820 sec 2024-11-20T17:25:57,679 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123557679"}]},"ts":"1732123557679"} 2024-11-20T17:25:57,680 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:25:57,682 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:25:57,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.0930 sec 2024-11-20T17:25:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T17:25:57,695 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T17:25:57,696 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:25:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:57,697 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=87, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T17:25:57,697 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=87, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:57,699 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:57,700 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/recovered.edits] 2024-11-20T17:25:57,702 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/36dda3ab383644c9a3212c3b9b8b88fb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/36dda3ab383644c9a3212c3b9b8b88fb 2024-11-20T17:25:57,703 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/71c7865df58640718ace99bcd2bafa86 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/71c7865df58640718ace99bcd2bafa86 2024-11-20T17:25:57,704 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/db61bb8e09f64d389c0d398e44bcf474 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/A/db61bb8e09f64d389c0d398e44bcf474 2024-11-20T17:25:57,706 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/72fa00f710784bb0a3a24486317d1fd3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/72fa00f710784bb0a3a24486317d1fd3 2024-11-20T17:25:57,707 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/89374c84b8734680b3ac74d4f39f4b85 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/89374c84b8734680b3ac74d4f39f4b85 2024-11-20T17:25:57,708 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e28335fe5376422dae76613de8ddfe6f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/B/e28335fe5376422dae76613de8ddfe6f 2024-11-20T17:25:57,709 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/6da0eb0eea3947a685305b8f5fa404d2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/6da0eb0eea3947a685305b8f5fa404d2 2024-11-20T17:25:57,710 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/72bc6d140c2943c592e1d87504501039 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/72bc6d140c2943c592e1d87504501039 2024-11-20T17:25:57,711 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d539d449d48445f7ac3bb6d597fff5c8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/C/d539d449d48445f7ac3bb6d597fff5c8 2024-11-20T17:25:57,714 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/recovered.edits/420.seqid to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd/recovered.edits/420.seqid 2024-11-20T17:25:57,714 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/9889f1e232eeda308e27b8e2d47795bd 2024-11-20T17:25:57,714 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:25:57,716 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=87, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:57,719 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:25:57,721 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:25:57,721 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=87, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:57,722 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:25:57,722 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123557722"}]},"ts":"9223372036854775807"} 2024-11-20T17:25:57,723 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:25:57,723 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9889f1e232eeda308e27b8e2d47795bd, NAME => 'TestAcidGuarantees,,1732123523209.9889f1e232eeda308e27b8e2d47795bd.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:25:57,723 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:25:57,723 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123557723"}]},"ts":"9223372036854775807"} 2024-11-20T17:25:57,725 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:25:57,727 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=87, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:57,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 31 msec 2024-11-20T17:25:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T17:25:57,798 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T17:25:57,807 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=238 (was 237) - Thread LEAK? -, OpenFileDescriptor=445 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=226 (was 258), ProcessCount=11 (was 11), AvailableMemoryMB=6174 (was 6210) 2024-11-20T17:25:57,815 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=238, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=226, ProcessCount=11, AvailableMemoryMB=6174 2024-11-20T17:25:57,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:25:57,817 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:25:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=88, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:57,818 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:25:57,818 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:57,818 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 88 2024-11-20T17:25:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T17:25:57,819 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:25:57,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742148_1324 (size=963) 2024-11-20T17:25:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T17:25:58,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T17:25:58,226 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:25:58,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742149_1325 (size=53) 2024-11-20T17:25:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T17:25:58,632 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:25:58,632 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 464447c196464f9bd851ab5282adadec, disabling compactions & flushes 2024-11-20T17:25:58,632 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:25:58,632 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:25:58,632 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. after waiting 0 ms 2024-11-20T17:25:58,632 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:25:58,632 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:25:58,632 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:25:58,633 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:25:58,633 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123558633"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123558633"}]},"ts":"1732123558633"} 2024-11-20T17:25:58,634 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:25:58,635 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:25:58,635 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123558635"}]},"ts":"1732123558635"} 2024-11-20T17:25:58,635 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:25:58,639 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, ASSIGN}] 2024-11-20T17:25:58,640 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, ASSIGN 2024-11-20T17:25:58,640 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, ASSIGN; state=OFFLINE, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=false 2024-11-20T17:25:58,791 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=464447c196464f9bd851ab5282adadec, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:25:58,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; OpenRegionProcedure 464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:25:58,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T17:25:58,943 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:25:58,946 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:25:58,946 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7285): Opening region: {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:25:58,947 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,947 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:25:58,947 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7327): checking encryption for 464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,947 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7330): checking classloading for 464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,948 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,949 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:25:58,949 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 464447c196464f9bd851ab5282adadec columnFamilyName A 2024-11-20T17:25:58,949 DEBUG [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:58,949 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(327): Store=464447c196464f9bd851ab5282adadec/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:25:58,950 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,950 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:25:58,950 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 464447c196464f9bd851ab5282adadec columnFamilyName B 2024-11-20T17:25:58,950 DEBUG [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:58,951 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(327): Store=464447c196464f9bd851ab5282adadec/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:25:58,951 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,952 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:25:58,952 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 464447c196464f9bd851ab5282adadec columnFamilyName C 2024-11-20T17:25:58,952 DEBUG [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:25:58,952 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(327): Store=464447c196464f9bd851ab5282adadec/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:25:58,952 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:25:58,953 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,953 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,954 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:25:58,955 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1085): writing seq id for 464447c196464f9bd851ab5282adadec 2024-11-20T17:25:58,956 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:25:58,957 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1102): Opened 464447c196464f9bd851ab5282adadec; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70351521, jitterRate=0.04831935465335846}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:25:58,957 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1001): Region open journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:25:58,958 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., pid=90, masterSystemTime=1732123558943 2024-11-20T17:25:58,959 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:25:58,959 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:25:58,959 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=464447c196464f9bd851ab5282adadec, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:25:58,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T17:25:58,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; OpenRegionProcedure 464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 in 168 msec 2024-11-20T17:25:58,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-11-20T17:25:58,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, ASSIGN in 322 msec 2024-11-20T17:25:58,963 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:25:58,963 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123558963"}]},"ts":"1732123558963"} 2024-11-20T17:25:58,964 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:25:58,966 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:25:58,967 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1490 sec 2024-11-20T17:25:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-11-20T17:25:59,922 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 88 completed 2024-11-20T17:25:59,923 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dacfd49 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5271608e 2024-11-20T17:25:59,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f9fed4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:25:59,928 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:25:59,930 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:25:59,931 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:25:59,932 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:25:59,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:25:59,933 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:25:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T17:25:59,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742150_1326 (size=999) 2024-11-20T17:26:00,344 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T17:26:00,344 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T17:26:00,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:26:00,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, REOPEN/MOVE}] 2024-11-20T17:26:00,348 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, REOPEN/MOVE 2024-11-20T17:26:00,349 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=464447c196464f9bd851ab5282adadec, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:00,350 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:26:00,350 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure 464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:26:00,501 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:00,501 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,501 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:26:00,501 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing 464447c196464f9bd851ab5282adadec, disabling compactions & flushes 2024-11-20T17:26:00,501 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:00,501 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:00,501 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. after waiting 0 ms 2024-11-20T17:26:00,501 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:00,505 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T17:26:00,505 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:00,505 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:00,505 WARN [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionServer(3786): Not adding moved region record: 464447c196464f9bd851ab5282adadec to self. 2024-11-20T17:26:00,507 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,507 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=464447c196464f9bd851ab5282adadec, regionState=CLOSED 2024-11-20T17:26:00,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-20T17:26:00,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure 464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 in 158 msec 2024-11-20T17:26:00,509 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, REOPEN/MOVE; state=CLOSED, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=true 2024-11-20T17:26:00,660 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=464447c196464f9bd851ab5282adadec, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:00,661 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=93, state=RUNNABLE; OpenRegionProcedure 464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:26:00,812 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:00,815 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:00,815 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7285): Opening region: {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:26:00,816 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,816 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:26:00,816 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7327): checking encryption for 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,816 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7330): checking classloading for 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,817 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,818 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:00,818 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 464447c196464f9bd851ab5282adadec columnFamilyName A 2024-11-20T17:26:00,819 DEBUG [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:00,819 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(327): Store=464447c196464f9bd851ab5282adadec/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:00,820 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,820 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:00,820 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 464447c196464f9bd851ab5282adadec columnFamilyName B 2024-11-20T17:26:00,820 DEBUG [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:00,821 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(327): Store=464447c196464f9bd851ab5282adadec/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:00,821 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,821 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:00,821 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 464447c196464f9bd851ab5282adadec columnFamilyName C 2024-11-20T17:26:00,821 DEBUG [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:00,822 INFO [StoreOpener-464447c196464f9bd851ab5282adadec-1 {}] regionserver.HStore(327): Store=464447c196464f9bd851ab5282adadec/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:00,822 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:00,822 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,823 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,824 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:26:00,825 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1085): writing seq id for 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,825 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1102): Opened 464447c196464f9bd851ab5282adadec; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65462344, jitterRate=-0.024535059928894043}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:26:00,826 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1001): Region open journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:00,827 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., pid=95, masterSystemTime=1732123560812 2024-11-20T17:26:00,828 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:00,828 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:00,828 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=464447c196464f9bd851ab5282adadec, regionState=OPEN, openSeqNum=5, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:00,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=93 2024-11-20T17:26:00,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; OpenRegionProcedure 464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 in 168 msec 2024-11-20T17:26:00,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-20T17:26:00,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, REOPEN/MOVE in 483 msec 2024-11-20T17:26:00,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-20T17:26:00,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 486 msec 2024-11-20T17:26:00,834 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 899 msec 2024-11-20T17:26:00,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T17:26:00,836 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1157d18a to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2076b3ad 2024-11-20T17:26:00,841 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c40db2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,842 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x353bcb3d to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@20c5edec 2024-11-20T17:26:00,845 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a86cb71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,845 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x77b5b03d to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@789089aa 2024-11-20T17:26:00,848 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3401188a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,849 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15bd9063 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@699c96a7 2024-11-20T17:26:00,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55650656, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,853 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c0ec341 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@712a5bc9 2024-11-20T17:26:00,855 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c80a40c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,856 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b660061 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62cf69c5 2024-11-20T17:26:00,858 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5910b8c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,859 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45ad0ff5 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c35c7c4 2024-11-20T17:26:00,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f9a05, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,862 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x28dc77ab to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a259e93 2024-11-20T17:26:00,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b6d860, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,865 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70304ef6 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6179765 2024-11-20T17:26:00,867 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16722a1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,868 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f8ea360 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3518b14b 2024-11-20T17:26:00,870 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@323d4725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:00,873 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:00,873 DEBUG [hconnection-0x404e4d5c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,873 DEBUG [hconnection-0x74e1600c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees 2024-11-20T17:26:00,874 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,874 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,874 DEBUG [hconnection-0x6117c1e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,875 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:00,875 DEBUG [hconnection-0x512dbd84-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:26:00,875 DEBUG [hconnection-0x79f23f58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,875 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,876 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,876 DEBUG [hconnection-0x775a5337-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,876 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:00,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:00,876 DEBUG [hconnection-0x3be3382a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,876 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,877 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,877 DEBUG [hconnection-0x169ace45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,878 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56388, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,880 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,880 DEBUG [hconnection-0x15bd9c22-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,880 DEBUG [hconnection-0x6121c855-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:00,881 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56394, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,881 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56404, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:00,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:00,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:00,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:00,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:00,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:00,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:00,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:00,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123620914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:00,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:00,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123620914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:00,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:00,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123620920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:00,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:00,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123620921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:00,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:00,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123620921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:00,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209d06091883214ec5919fac33a1fd8d9b_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123560892/Put/seqid=0 2024-11-20T17:26:00,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742151_1327 (size=12154) 2024-11-20T17:26:00,962 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:00,966 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209d06091883214ec5919fac33a1fd8d9b_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209d06091883214ec5919fac33a1fd8d9b_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:00,967 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1afa8c2b7a58493d845ae91e6666b337, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:00,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1afa8c2b7a58493d845ae91e6666b337 is 175, key is test_row_0/A:col10/1732123560892/Put/seqid=0 2024-11-20T17:26:00,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:26:00,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742152_1328 (size=30955) 2024-11-20T17:26:01,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123621021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123621021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123621023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,027 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:01,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123621028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T17:26:01,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:01,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:01,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:01,029 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123621028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:26:01,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:01,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T17:26:01,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:01,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:01,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:01,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123621225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123621226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123621226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123621230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123621232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,335 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:01,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T17:26:01,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:01,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:01,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:01,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:01,382 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1afa8c2b7a58493d845ae91e6666b337 2024-11-20T17:26:01,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/4afa4bb516634f3f9be4a317e441ebfd is 50, key is test_row_0/B:col10/1732123560892/Put/seqid=0 2024-11-20T17:26:01,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742153_1329 (size=12001) 2024-11-20T17:26:01,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/4afa4bb516634f3f9be4a317e441ebfd 2024-11-20T17:26:01,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/e4a3a4f75a20455980578a0e306205ba is 50, key is test_row_0/C:col10/1732123560892/Put/seqid=0 2024-11-20T17:26:01,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742154_1330 (size=12001) 2024-11-20T17:26:01,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/e4a3a4f75a20455980578a0e306205ba 2024-11-20T17:26:01,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1afa8c2b7a58493d845ae91e6666b337 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1afa8c2b7a58493d845ae91e6666b337 2024-11-20T17:26:01,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1afa8c2b7a58493d845ae91e6666b337, entries=150, sequenceid=16, filesize=30.2 K 2024-11-20T17:26:01,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/4afa4bb516634f3f9be4a317e441ebfd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/4afa4bb516634f3f9be4a317e441ebfd 2024-11-20T17:26:01,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/4afa4bb516634f3f9be4a317e441ebfd, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T17:26:01,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/e4a3a4f75a20455980578a0e306205ba as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/e4a3a4f75a20455980578a0e306205ba 2024-11-20T17:26:01,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/e4a3a4f75a20455980578a0e306205ba, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T17:26:01,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 464447c196464f9bd851ab5282adadec in 569ms, sequenceid=16, compaction requested=false 2024-11-20T17:26:01,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:01,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:26:01,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:01,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-11-20T17:26:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:01,489 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:26:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:01,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:01,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e47764e19bbb40afbfbc22dfa22d5b92_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123560912/Put/seqid=0 2024-11-20T17:26:01,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742155_1331 (size=12154) 2024-11-20T17:26:01,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:01,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:01,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123621535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123621537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123621538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123621541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123621541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123621642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123621642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123621644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123621645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123621648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123621847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123621848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123621849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123621850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123621852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:01,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:01,912 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e47764e19bbb40afbfbc22dfa22d5b92_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e47764e19bbb40afbfbc22dfa22d5b92_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:01,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/25891e63ca9f43468432918f6d6530b2, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:01,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/25891e63ca9f43468432918f6d6530b2 is 175, key is test_row_0/A:col10/1732123560912/Put/seqid=0 2024-11-20T17:26:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742156_1332 (size=30955) 2024-11-20T17:26:01,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:26:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123622153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123622154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123622154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123622155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123622159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,321 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/25891e63ca9f43468432918f6d6530b2 2024-11-20T17:26:02,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/653dd3c5018042ad89bab6fdd9df579b is 50, key is test_row_0/B:col10/1732123560912/Put/seqid=0 2024-11-20T17:26:02,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742157_1333 (size=12001) 2024-11-20T17:26:02,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123622658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123622659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123622660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123622660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:02,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123622664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:02,732 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/653dd3c5018042ad89bab6fdd9df579b 2024-11-20T17:26:02,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cb9b6108db804fa685035d6f4eb01da0 is 50, key is test_row_0/C:col10/1732123560912/Put/seqid=0 2024-11-20T17:26:02,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742158_1334 (size=12001) 2024-11-20T17:26:02,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:26:03,143 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cb9b6108db804fa685035d6f4eb01da0 2024-11-20T17:26:03,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/25891e63ca9f43468432918f6d6530b2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/25891e63ca9f43468432918f6d6530b2 2024-11-20T17:26:03,150 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/25891e63ca9f43468432918f6d6530b2, entries=150, sequenceid=42, filesize=30.2 K 2024-11-20T17:26:03,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/653dd3c5018042ad89bab6fdd9df579b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/653dd3c5018042ad89bab6fdd9df579b 2024-11-20T17:26:03,155 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/653dd3c5018042ad89bab6fdd9df579b, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T17:26:03,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cb9b6108db804fa685035d6f4eb01da0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cb9b6108db804fa685035d6f4eb01da0 2024-11-20T17:26:03,159 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cb9b6108db804fa685035d6f4eb01da0, entries=150, sequenceid=42, filesize=11.7 K 2024-11-20T17:26:03,160 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 464447c196464f9bd851ab5282adadec in 1671ms, sequenceid=42, compaction requested=false 2024-11-20T17:26:03,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:03,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:03,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=97 2024-11-20T17:26:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=97 2024-11-20T17:26:03,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-20T17:26:03,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2850 sec 2024-11-20T17:26:03,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees in 2.2900 sec 2024-11-20T17:26:03,206 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:26:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:03,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:26:03,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:03,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:03,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:03,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:03,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:03,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:03,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203e84ce0d96814594a1777bb68b254f4d_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123561539/Put/seqid=0 2024-11-20T17:26:03,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742159_1335 (size=14594) 2024-11-20T17:26:03,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123623691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123623691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123623694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123623696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123623697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123623798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123623798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123623798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123623801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:03,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:03,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123623803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123624005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123624005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123624005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123624006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123624009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,084 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:04,088 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203e84ce0d96814594a1777bb68b254f4d_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e84ce0d96814594a1777bb68b254f4d_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:04,089 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/3b2f005803a447b6bc2a80b525307e7b, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:04,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/3b2f005803a447b6bc2a80b525307e7b is 175, key is test_row_0/A:col10/1732123561539/Put/seqid=0 2024-11-20T17:26:04,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742160_1336 (size=39549) 2024-11-20T17:26:04,093 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/3b2f005803a447b6bc2a80b525307e7b 2024-11-20T17:26:04,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/137f4f340e194854a0b8f613bce1fcea is 50, key is test_row_0/B:col10/1732123561539/Put/seqid=0 2024-11-20T17:26:04,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742161_1337 (size=12001) 2024-11-20T17:26:04,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123624310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123624314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123624314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123624315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123624316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/137f4f340e194854a0b8f613bce1fcea 2024-11-20T17:26:04,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3ec30834a9a24e86a1cf91f73318a083 is 50, key is test_row_0/C:col10/1732123561539/Put/seqid=0 2024-11-20T17:26:04,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742162_1338 (size=12001) 2024-11-20T17:26:04,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3ec30834a9a24e86a1cf91f73318a083 2024-11-20T17:26:04,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/3b2f005803a447b6bc2a80b525307e7b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3b2f005803a447b6bc2a80b525307e7b 2024-11-20T17:26:04,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3b2f005803a447b6bc2a80b525307e7b, entries=200, sequenceid=54, filesize=38.6 K 2024-11-20T17:26:04,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/137f4f340e194854a0b8f613bce1fcea as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/137f4f340e194854a0b8f613bce1fcea 2024-11-20T17:26:04,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/137f4f340e194854a0b8f613bce1fcea, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T17:26:04,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3ec30834a9a24e86a1cf91f73318a083 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3ec30834a9a24e86a1cf91f73318a083 2024-11-20T17:26:04,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3ec30834a9a24e86a1cf91f73318a083, entries=150, sequenceid=54, filesize=11.7 K 2024-11-20T17:26:04,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 464447c196464f9bd851ab5282adadec in 869ms, sequenceid=54, compaction requested=true 2024-11-20T17:26:04,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:04,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:04,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:04,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:04,535 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:04,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:04,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:04,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:04,536 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:04,536 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:04,537 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:04,537 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/A is initiating minor compaction (all files) 2024-11-20T17:26:04,537 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/B is initiating minor compaction (all files) 2024-11-20T17:26:04,537 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/B in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:04,537 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/A in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:04,537 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1afa8c2b7a58493d845ae91e6666b337, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/25891e63ca9f43468432918f6d6530b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3b2f005803a447b6bc2a80b525307e7b] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=99.1 K 2024-11-20T17:26:04,537 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/4afa4bb516634f3f9be4a317e441ebfd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/653dd3c5018042ad89bab6fdd9df579b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/137f4f340e194854a0b8f613bce1fcea] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=35.2 K 2024-11-20T17:26:04,537 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:04,537 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1afa8c2b7a58493d845ae91e6666b337, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/25891e63ca9f43468432918f6d6530b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3b2f005803a447b6bc2a80b525307e7b] 2024-11-20T17:26:04,537 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4afa4bb516634f3f9be4a317e441ebfd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732123560892 2024-11-20T17:26:04,537 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1afa8c2b7a58493d845ae91e6666b337, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732123560892 2024-11-20T17:26:04,538 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 653dd3c5018042ad89bab6fdd9df579b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732123560912 2024-11-20T17:26:04,538 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25891e63ca9f43468432918f6d6530b2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732123560912 2024-11-20T17:26:04,538 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 137f4f340e194854a0b8f613bce1fcea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123561536 2024-11-20T17:26:04,538 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b2f005803a447b6bc2a80b525307e7b, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123561536 2024-11-20T17:26:04,544 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:04,545 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#B#compaction#288 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:04,545 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/5a3bf1bc9c2e47518f02295ef8481704 is 50, key is test_row_0/B:col10/1732123561539/Put/seqid=0 2024-11-20T17:26:04,548 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c70b05fe1381429a8b78ef1c12794e75_464447c196464f9bd851ab5282adadec store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:04,550 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c70b05fe1381429a8b78ef1c12794e75_464447c196464f9bd851ab5282adadec, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:04,550 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c70b05fe1381429a8b78ef1c12794e75_464447c196464f9bd851ab5282adadec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:04,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742163_1339 (size=12104) 2024-11-20T17:26:04,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742164_1340 (size=4469) 2024-11-20T17:26:04,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:04,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:26:04,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:04,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:04,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:04,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:04,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:04,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:04,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207eb04df96d4d4b6e8c47d07c4271aef6_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123563690/Put/seqid=0 2024-11-20T17:26:04,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742165_1341 (size=14594) 2024-11-20T17:26:04,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123624827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123624827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123624828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123624834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123624834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123624936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123624936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123624936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123624941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:04,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123624942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:04,963 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/5a3bf1bc9c2e47518f02295ef8481704 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/5a3bf1bc9c2e47518f02295ef8481704 2024-11-20T17:26:04,967 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 464447c196464f9bd851ab5282adadec/B of 464447c196464f9bd851ab5282adadec into 5a3bf1bc9c2e47518f02295ef8481704(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:04,967 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:04,967 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/B, priority=13, startTime=1732123564535; duration=0sec 2024-11-20T17:26:04,967 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#A#compaction#289 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:04,967 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:04,967 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:B 2024-11-20T17:26:04,967 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:04,968 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/0e2835adfc704198a348ce4f35bb6d95 is 175, key is test_row_0/A:col10/1732123561539/Put/seqid=0 2024-11-20T17:26:04,968 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:04,968 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/C is initiating minor compaction (all files) 2024-11-20T17:26:04,968 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/C in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:04,968 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/e4a3a4f75a20455980578a0e306205ba, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cb9b6108db804fa685035d6f4eb01da0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3ec30834a9a24e86a1cf91f73318a083] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=35.2 K 2024-11-20T17:26:04,969 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting e4a3a4f75a20455980578a0e306205ba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732123560892 2024-11-20T17:26:04,969 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting cb9b6108db804fa685035d6f4eb01da0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732123560912 2024-11-20T17:26:04,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742166_1342 (size=31058) 2024-11-20T17:26:04,974 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ec30834a9a24e86a1cf91f73318a083, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123561536 2024-11-20T17:26:04,978 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/0e2835adfc704198a348ce4f35bb6d95 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/0e2835adfc704198a348ce4f35bb6d95 2024-11-20T17:26:04,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-11-20T17:26:04,980 INFO [Thread-1492 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-11-20T17:26:04,981 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#C#compaction#291 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:04,981 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/c42f107ddeab49f8a25d99fb879130eb is 50, key is test_row_0/C:col10/1732123561539/Put/seqid=0 2024-11-20T17:26:04,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:04,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-11-20T17:26:04,984 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:04,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T17:26:04,985 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:04,985 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:04,985 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 464447c196464f9bd851ab5282adadec/A of 464447c196464f9bd851ab5282adadec into 0e2835adfc704198a348ce4f35bb6d95(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:04,985 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:04,985 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/A, priority=13, startTime=1732123564535; duration=0sec 2024-11-20T17:26:04,985 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:04,985 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:A 2024-11-20T17:26:04,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742167_1343 (size=12104) 2024-11-20T17:26:04,998 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/c42f107ddeab49f8a25d99fb879130eb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c42f107ddeab49f8a25d99fb879130eb 2024-11-20T17:26:05,002 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 464447c196464f9bd851ab5282adadec/C of 464447c196464f9bd851ab5282adadec into c42f107ddeab49f8a25d99fb879130eb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:05,002 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:05,002 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/C, priority=13, startTime=1732123564536; duration=0sec 2024-11-20T17:26:05,002 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:05,002 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:C 2024-11-20T17:26:05,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T17:26:05,136 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:05,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:05,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:05,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123625141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123625141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123625142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123625146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123625147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,230 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:05,234 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207eb04df96d4d4b6e8c47d07c4271aef6_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207eb04df96d4d4b6e8c47d07c4271aef6_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:05,235 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/2eac3ef816594a859147e3c78333ba6c, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:05,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/2eac3ef816594a859147e3c78333ba6c is 175, key is test_row_0/A:col10/1732123563690/Put/seqid=0 2024-11-20T17:26:05,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742168_1344 (size=39549) 2024-11-20T17:26:05,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T17:26:05,289 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:05,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:05,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:05,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,442 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:05,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:05,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:05,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123625447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123625448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123625448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123625452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123625452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T17:26:05,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:05,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:05,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:05,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,640 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/2eac3ef816594a859147e3c78333ba6c 2024-11-20T17:26:05,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/6ba8823798824fa1b89fee429b21fffa is 50, key is test_row_0/B:col10/1732123563690/Put/seqid=0 2024-11-20T17:26:05,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742169_1345 (size=12001) 2024-11-20T17:26:05,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:05,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:05,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:05,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,905 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:05,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:05,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:05,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:05,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:05,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123625953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123625955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123625955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123625956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:05,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:05,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123625957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:06,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/6ba8823798824fa1b89fee429b21fffa 2024-11-20T17:26:06,058 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:06,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:06,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:06,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:06,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:06,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/2f9d6a5d96274f768fb26bbf23260946 is 50, key is test_row_0/C:col10/1732123563690/Put/seqid=0 2024-11-20T17:26:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742170_1346 (size=12001) 2024-11-20T17:26:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T17:26:06,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:06,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:06,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:06,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:06,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:06,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,363 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:06,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:06,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:06,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:06,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:06,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:06,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/2f9d6a5d96274f768fb26bbf23260946 2024-11-20T17:26:06,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/2eac3ef816594a859147e3c78333ba6c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2eac3ef816594a859147e3c78333ba6c 2024-11-20T17:26:06,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2eac3ef816594a859147e3c78333ba6c, entries=200, sequenceid=80, filesize=38.6 K 2024-11-20T17:26:06,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/6ba8823798824fa1b89fee429b21fffa as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6ba8823798824fa1b89fee429b21fffa 2024-11-20T17:26:06,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6ba8823798824fa1b89fee429b21fffa, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T17:26:06,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/2f9d6a5d96274f768fb26bbf23260946 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/2f9d6a5d96274f768fb26bbf23260946 2024-11-20T17:26:06,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/2f9d6a5d96274f768fb26bbf23260946, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T17:26:06,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 464447c196464f9bd851ab5282adadec in 1665ms, sequenceid=80, compaction requested=false 2024-11-20T17:26:06,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:06,516 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:06,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-11-20T17:26:06,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:06,517 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T17:26:06,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:06,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:06,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:06,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:06,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:06,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:06,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112066ca4fc33fe5403f9ab7ec3a54773f27_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123564827/Put/seqid=0 2024-11-20T17:26:06,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742171_1347 (size=12154) 2024-11-20T17:26:06,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:06,942 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112066ca4fc33fe5403f9ab7ec3a54773f27_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112066ca4fc33fe5403f9ab7ec3a54773f27_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:06,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/3e73c14ea152485faa7aa07ad88b5a8c, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:06,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/3e73c14ea152485faa7aa07ad88b5a8c is 175, key is test_row_0/A:col10/1732123564827/Put/seqid=0 2024-11-20T17:26:06,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742172_1348 (size=30955) 2024-11-20T17:26:06,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:06,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:06,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123626982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:06,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:06,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123626983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:06,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:06,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123626986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:06,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:06,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123626987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:06,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:06,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123626987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T17:26:07,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123627089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123627089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123627091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123627091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123627092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123627293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123627293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123627296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123627297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123627297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,348 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/3e73c14ea152485faa7aa07ad88b5a8c 2024-11-20T17:26:07,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/f3aca11a37a14345b49a2624f196f6b4 is 50, key is test_row_0/B:col10/1732123564827/Put/seqid=0 2024-11-20T17:26:07,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742173_1349 (size=12001) 2024-11-20T17:26:07,362 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/f3aca11a37a14345b49a2624f196f6b4 2024-11-20T17:26:07,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/8c4825d4324a4aad9b9124c78bbae2e8 is 50, key is test_row_0/C:col10/1732123564827/Put/seqid=0 2024-11-20T17:26:07,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742174_1350 (size=12001) 2024-11-20T17:26:07,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123627597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123627598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123627601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123627603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:07,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123627604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:07,773 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/8c4825d4324a4aad9b9124c78bbae2e8 2024-11-20T17:26:07,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/3e73c14ea152485faa7aa07ad88b5a8c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3e73c14ea152485faa7aa07ad88b5a8c 2024-11-20T17:26:07,782 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3e73c14ea152485faa7aa07ad88b5a8c, entries=150, sequenceid=93, filesize=30.2 K 2024-11-20T17:26:07,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/f3aca11a37a14345b49a2624f196f6b4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/f3aca11a37a14345b49a2624f196f6b4 2024-11-20T17:26:07,786 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/f3aca11a37a14345b49a2624f196f6b4, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T17:26:07,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/8c4825d4324a4aad9b9124c78bbae2e8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/8c4825d4324a4aad9b9124c78bbae2e8 2024-11-20T17:26:07,790 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/8c4825d4324a4aad9b9124c78bbae2e8, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T17:26:07,791 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 464447c196464f9bd851ab5282adadec in 1274ms, sequenceid=93, compaction requested=true 2024-11-20T17:26:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:07,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-11-20T17:26:07,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-11-20T17:26:07,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-20T17:26:07,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8070 sec 2024-11-20T17:26:07,795 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 2.8120 sec 2024-11-20T17:26:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:08,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T17:26:08,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:08,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:08,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:08,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:08,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:08,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:08,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123628112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a19a632a66e34e8db315e5c9ecf2624c_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123568108/Put/seqid=0 2024-11-20T17:26:08,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123628114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123628114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123628115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123628116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742175_1351 (size=12154) 2024-11-20T17:26:08,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123628219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123628220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123628220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123628220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123628425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123628425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123628425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123628426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,522 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:08,526 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a19a632a66e34e8db315e5c9ecf2624c_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a19a632a66e34e8db315e5c9ecf2624c_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:08,527 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/96376774f09d4d6495107ab04a0c608c, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:08,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/96376774f09d4d6495107ab04a0c608c is 175, key is test_row_0/A:col10/1732123568108/Put/seqid=0 2024-11-20T17:26:08,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742176_1352 (size=30955) 2024-11-20T17:26:08,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123628728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123628728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123628729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123628731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:08,942 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/96376774f09d4d6495107ab04a0c608c 2024-11-20T17:26:08,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/28bbc2a2f9234b73aa306bff4fafe594 is 50, key is test_row_0/B:col10/1732123568108/Put/seqid=0 2024-11-20T17:26:08,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742177_1353 (size=12001) 2024-11-20T17:26:08,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/28bbc2a2f9234b73aa306bff4fafe594 2024-11-20T17:26:08,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cf5e2b6004c348a3bf3570efb0895d1d is 50, key is test_row_0/C:col10/1732123568108/Put/seqid=0 2024-11-20T17:26:08,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742178_1354 (size=12001) 2024-11-20T17:26:08,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cf5e2b6004c348a3bf3570efb0895d1d 2024-11-20T17:26:08,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/96376774f09d4d6495107ab04a0c608c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/96376774f09d4d6495107ab04a0c608c 2024-11-20T17:26:08,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/96376774f09d4d6495107ab04a0c608c, entries=150, sequenceid=121, filesize=30.2 K 2024-11-20T17:26:08,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/28bbc2a2f9234b73aa306bff4fafe594 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/28bbc2a2f9234b73aa306bff4fafe594 2024-11-20T17:26:08,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/28bbc2a2f9234b73aa306bff4fafe594, entries=150, sequenceid=121, filesize=11.7 K 2024-11-20T17:26:08,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cf5e2b6004c348a3bf3570efb0895d1d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf5e2b6004c348a3bf3570efb0895d1d 2024-11-20T17:26:08,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf5e2b6004c348a3bf3570efb0895d1d, entries=150, sequenceid=121, filesize=11.7 K 2024-11-20T17:26:08,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 464447c196464f9bd851ab5282adadec in 890ms, sequenceid=121, compaction requested=true 2024-11-20T17:26:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:08,999 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:08,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:08,999 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:09,000 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:09,000 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/B is initiating minor compaction (all files) 2024-11-20T17:26:09,000 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/B in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,000 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/5a3bf1bc9c2e47518f02295ef8481704, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6ba8823798824fa1b89fee429b21fffa, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/f3aca11a37a14345b49a2624f196f6b4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/28bbc2a2f9234b73aa306bff4fafe594] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=47.0 K 2024-11-20T17:26:09,001 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a3bf1bc9c2e47518f02295ef8481704, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123561536 2024-11-20T17:26:09,001 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:09,001 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ba8823798824fa1b89fee429b21fffa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123563690 2024-11-20T17:26:09,001 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/A is initiating minor compaction (all files) 2024-11-20T17:26:09,001 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/A in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,001 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/0e2835adfc704198a348ce4f35bb6d95, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2eac3ef816594a859147e3c78333ba6c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3e73c14ea152485faa7aa07ad88b5a8c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/96376774f09d4d6495107ab04a0c608c] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=129.4 K 2024-11-20T17:26:09,001 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,002 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting f3aca11a37a14345b49a2624f196f6b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732123564827 2024-11-20T17:26:09,002 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/0e2835adfc704198a348ce4f35bb6d95, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2eac3ef816594a859147e3c78333ba6c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3e73c14ea152485faa7aa07ad88b5a8c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/96376774f09d4d6495107ab04a0c608c] 2024-11-20T17:26:09,002 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 28bbc2a2f9234b73aa306bff4fafe594, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732123566979 2024-11-20T17:26:09,002 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e2835adfc704198a348ce4f35bb6d95, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123561536 2024-11-20T17:26:09,002 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2eac3ef816594a859147e3c78333ba6c, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123563690 2024-11-20T17:26:09,002 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e73c14ea152485faa7aa07ad88b5a8c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732123564827 2024-11-20T17:26:09,003 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96376774f09d4d6495107ab04a0c608c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732123566979 2024-11-20T17:26:09,011 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#B#compaction#300 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:09,012 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/83ad4bcafc1f48dbb264c461d4793f50 is 50, key is test_row_0/B:col10/1732123568108/Put/seqid=0 2024-11-20T17:26:09,013 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:09,016 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e5ee41352f2f4775a00208186b35be09_464447c196464f9bd851ab5282adadec store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:09,018 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e5ee41352f2f4775a00208186b35be09_464447c196464f9bd851ab5282adadec, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:09,018 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e5ee41352f2f4775a00208186b35be09_464447c196464f9bd851ab5282adadec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:09,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742179_1355 (size=12241) 2024-11-20T17:26:09,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742180_1356 (size=4469) 2024-11-20T17:26:09,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-20T17:26:09,090 INFO [Thread-1492 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-20T17:26:09,091 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-11-20T17:26:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T17:26:09,092 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:09,093 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:09,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:09,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:09,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:09,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:09,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:09,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112068299829fdbc46daa531938666f5e3e3_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123568109/Put/seqid=0 2024-11-20T17:26:09,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742181_1357 (size=14744) 2024-11-20T17:26:09,145 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:09,149 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112068299829fdbc46daa531938666f5e3e3_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112068299829fdbc46daa531938666f5e3e3_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:09,150 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/ce95f502d1eb40869ed76cd9aecbc255, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:09,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/ce95f502d1eb40869ed76cd9aecbc255 is 175, key is test_row_0/A:col10/1732123568109/Put/seqid=0 2024-11-20T17:26:09,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742182_1358 (size=39699) 2024-11-20T17:26:09,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T17:26:09,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:09,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T17:26:09,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123629246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123629247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123629248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123629249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123629252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123629353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123629354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123629355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123629356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123629356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T17:26:09,398 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:09,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T17:26:09,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:09,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,424 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/83ad4bcafc1f48dbb264c461d4793f50 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/83ad4bcafc1f48dbb264c461d4793f50 2024-11-20T17:26:09,424 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#A#compaction#301 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:09,425 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/4e3a63ce8fb04b4b84285ad597192378 is 175, key is test_row_0/A:col10/1732123568108/Put/seqid=0 2024-11-20T17:26:09,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742183_1359 (size=31195) 2024-11-20T17:26:09,430 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/B of 464447c196464f9bd851ab5282adadec into 83ad4bcafc1f48dbb264c461d4793f50(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:09,430 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:09,430 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/B, priority=12, startTime=1732123568999; duration=0sec 2024-11-20T17:26:09,430 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:09,430 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:B 2024-11-20T17:26:09,430 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:09,431 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:09,431 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/C is initiating minor compaction (all files) 2024-11-20T17:26:09,431 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/C in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,431 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c42f107ddeab49f8a25d99fb879130eb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/2f9d6a5d96274f768fb26bbf23260946, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/8c4825d4324a4aad9b9124c78bbae2e8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf5e2b6004c348a3bf3570efb0895d1d] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=47.0 K 2024-11-20T17:26:09,432 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c42f107ddeab49f8a25d99fb879130eb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732123561536 2024-11-20T17:26:09,432 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f9d6a5d96274f768fb26bbf23260946, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732123563690 2024-11-20T17:26:09,433 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c4825d4324a4aad9b9124c78bbae2e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732123564827 2024-11-20T17:26:09,433 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting cf5e2b6004c348a3bf3570efb0895d1d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732123566979 2024-11-20T17:26:09,441 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#C#compaction#303 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:09,441 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/fc27ad8e58df43b190e5dd4aa0ce43a3 is 50, key is test_row_0/C:col10/1732123568108/Put/seqid=0 2024-11-20T17:26:09,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742184_1360 (size=12241) 2024-11-20T17:26:09,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:09,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T17:26:09,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:09,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:09,557 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/ce95f502d1eb40869ed76cd9aecbc255 2024-11-20T17:26:09,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123629560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123629561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123629562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/8013bb69446441f6817773fd890d0717 is 50, key is test_row_0/B:col10/1732123568109/Put/seqid=0 2024-11-20T17:26:09,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123629562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123629562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742185_1361 (size=12101) 2024-11-20T17:26:09,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/8013bb69446441f6817773fd890d0717 2024-11-20T17:26:09,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/347f30ace34146099199d171b3409319 is 50, key is test_row_0/C:col10/1732123568109/Put/seqid=0 2024-11-20T17:26:09,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742186_1362 (size=12101) 2024-11-20T17:26:09,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/347f30ace34146099199d171b3409319 2024-11-20T17:26:09,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/ce95f502d1eb40869ed76cd9aecbc255 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/ce95f502d1eb40869ed76cd9aecbc255 2024-11-20T17:26:09,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/ce95f502d1eb40869ed76cd9aecbc255, entries=200, sequenceid=132, filesize=38.8 K 2024-11-20T17:26:09,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/8013bb69446441f6817773fd890d0717 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/8013bb69446441f6817773fd890d0717 2024-11-20T17:26:09,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/8013bb69446441f6817773fd890d0717, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T17:26:09,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/347f30ace34146099199d171b3409319 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/347f30ace34146099199d171b3409319 2024-11-20T17:26:09,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/347f30ace34146099199d171b3409319, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T17:26:09,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 464447c196464f9bd851ab5282adadec in 467ms, sequenceid=132, compaction requested=false 2024-11-20T17:26:09,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T17:26:09,703 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:09,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T17:26:09,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:09,704 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:26:09,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:09,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:09,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:09,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:09,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:09,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:09,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112085c8150f91624837bf542acae4841b72_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123569239/Put/seqid=0 2024-11-20T17:26:09,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742187_1363 (size=12304) 2024-11-20T17:26:09,834 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/4e3a63ce8fb04b4b84285ad597192378 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/4e3a63ce8fb04b4b84285ad597192378 2024-11-20T17:26:09,840 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/A of 464447c196464f9bd851ab5282adadec into 4e3a63ce8fb04b4b84285ad597192378(size=30.5 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:09,840 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:09,840 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/A, priority=12, startTime=1732123568999; duration=0sec 2024-11-20T17:26:09,840 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:09,840 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:A 2024-11-20T17:26:09,851 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/fc27ad8e58df43b190e5dd4aa0ce43a3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fc27ad8e58df43b190e5dd4aa0ce43a3 2024-11-20T17:26:09,857 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/C of 464447c196464f9bd851ab5282adadec into fc27ad8e58df43b190e5dd4aa0ce43a3(size=12.0 K), total size for store is 23.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:09,857 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:09,857 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/C, priority=12, startTime=1732123568999; duration=0sec 2024-11-20T17:26:09,857 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:09,857 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:C 2024-11-20T17:26:09,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:09,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:09,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123629871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123629876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123629876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123629877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123629877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123629978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123629985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123629986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123629986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:09,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:09,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123629987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:10,122 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112085c8150f91624837bf542acae4841b72_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112085c8150f91624837bf542acae4841b72_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:10,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1e185227ac70463e9e9366127b5ac9cb, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:10,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1e185227ac70463e9e9366127b5ac9cb is 175, key is test_row_0/A:col10/1732123569239/Put/seqid=0 2024-11-20T17:26:10,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742188_1364 (size=31105) 2024-11-20T17:26:10,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123630181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123630189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123630189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123630189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T17:26:10,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123630192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123630489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123630496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123630496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123630498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123630498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:10,528 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1e185227ac70463e9e9366127b5ac9cb 2024-11-20T17:26:10,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/57aa07db4ca048ceb9ad7f8a133cab63 is 50, key is test_row_0/B:col10/1732123569239/Put/seqid=0 2024-11-20T17:26:10,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742189_1365 (size=12151) 2024-11-20T17:26:10,939 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/57aa07db4ca048ceb9ad7f8a133cab63 2024-11-20T17:26:10,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/09df6c4db9c24b5fad558621fe243911 is 50, key is test_row_0/C:col10/1732123569239/Put/seqid=0 2024-11-20T17:26:10,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742190_1366 (size=12151) 2024-11-20T17:26:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123630996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:11,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:11,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123631001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:11,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:11,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123631001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:11,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:11,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123631002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:11,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:11,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123631003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:11,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T17:26:11,352 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/09df6c4db9c24b5fad558621fe243911 2024-11-20T17:26:11,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1e185227ac70463e9e9366127b5ac9cb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1e185227ac70463e9e9366127b5ac9cb 2024-11-20T17:26:11,361 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1e185227ac70463e9e9366127b5ac9cb, entries=150, sequenceid=158, filesize=30.4 K 2024-11-20T17:26:11,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/57aa07db4ca048ceb9ad7f8a133cab63 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/57aa07db4ca048ceb9ad7f8a133cab63 2024-11-20T17:26:11,365 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/57aa07db4ca048ceb9ad7f8a133cab63, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T17:26:11,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/09df6c4db9c24b5fad558621fe243911 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/09df6c4db9c24b5fad558621fe243911 2024-11-20T17:26:11,369 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/09df6c4db9c24b5fad558621fe243911, entries=150, sequenceid=158, filesize=11.9 K 2024-11-20T17:26:11,370 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 464447c196464f9bd851ab5282adadec in 1666ms, sequenceid=158, compaction requested=true 2024-11-20T17:26:11,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:11,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:11,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-20T17:26:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-11-20T17:26:11,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-20T17:26:11,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2790 sec 2024-11-20T17:26:11,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 2.2820 sec 2024-11-20T17:26:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:12,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:26:12,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:12,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:12,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:12,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:12,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:12,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:12,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112056e04aaa2d7e44549491177a0b7b0e3e_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123572009/Put/seqid=0 2024-11-20T17:26:12,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742191_1367 (size=14794) 2024-11-20T17:26:12,022 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:12,025 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112056e04aaa2d7e44549491177a0b7b0e3e_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112056e04aaa2d7e44549491177a0b7b0e3e_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:12,026 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/e72479e5ed404345baa91844fdb00d87, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:12,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/e72479e5ed404345baa91844fdb00d87 is 175, key is test_row_0/A:col10/1732123572009/Put/seqid=0 2024-11-20T17:26:12,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742192_1368 (size=39749) 2024-11-20T17:26:12,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123632037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123632037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123632041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123632042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123632042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123632143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123632143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123632148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123632148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123632148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123632346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123632349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123632351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123632352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123632354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,432 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/e72479e5ed404345baa91844fdb00d87 2024-11-20T17:26:12,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/89762bab8e0443d8ad6301670262910a is 50, key is test_row_0/B:col10/1732123572009/Put/seqid=0 2024-11-20T17:26:12,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742193_1369 (size=12151) 2024-11-20T17:26:12,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/89762bab8e0443d8ad6301670262910a 2024-11-20T17:26:12,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/94d5d9cc5d644646ab108cdfaf8bfe58 is 50, key is test_row_0/C:col10/1732123572009/Put/seqid=0 2024-11-20T17:26:12,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742194_1370 (size=12151) 2024-11-20T17:26:12,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123632652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123632652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123632655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123632655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:12,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123632659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:12,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/94d5d9cc5d644646ab108cdfaf8bfe58 2024-11-20T17:26:12,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/e72479e5ed404345baa91844fdb00d87 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/e72479e5ed404345baa91844fdb00d87 2024-11-20T17:26:12,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/e72479e5ed404345baa91844fdb00d87, entries=200, sequenceid=172, filesize=38.8 K 2024-11-20T17:26:12,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/89762bab8e0443d8ad6301670262910a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/89762bab8e0443d8ad6301670262910a 2024-11-20T17:26:12,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/89762bab8e0443d8ad6301670262910a, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T17:26:12,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/94d5d9cc5d644646ab108cdfaf8bfe58 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/94d5d9cc5d644646ab108cdfaf8bfe58 2024-11-20T17:26:12,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/94d5d9cc5d644646ab108cdfaf8bfe58, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T17:26:12,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 464447c196464f9bd851ab5282adadec in 869ms, sequenceid=172, compaction requested=true 2024-11-20T17:26:12,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:12,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:12,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:12,879 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:12,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:12,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:12,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:12,879 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:12,879 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:12,880 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141748 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:12,880 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:12,881 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/A is initiating minor compaction (all files) 2024-11-20T17:26:12,881 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/B is initiating minor compaction (all files) 2024-11-20T17:26:12,881 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/A in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:12,881 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/B in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:12,881 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/4e3a63ce8fb04b4b84285ad597192378, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/ce95f502d1eb40869ed76cd9aecbc255, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1e185227ac70463e9e9366127b5ac9cb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/e72479e5ed404345baa91844fdb00d87] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=138.4 K 2024-11-20T17:26:12,881 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/83ad4bcafc1f48dbb264c461d4793f50, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/8013bb69446441f6817773fd890d0717, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/57aa07db4ca048ceb9ad7f8a133cab63, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/89762bab8e0443d8ad6301670262910a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=47.5 K 2024-11-20T17:26:12,881 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:12,881 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/4e3a63ce8fb04b4b84285ad597192378, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/ce95f502d1eb40869ed76cd9aecbc255, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1e185227ac70463e9e9366127b5ac9cb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/e72479e5ed404345baa91844fdb00d87] 2024-11-20T17:26:12,881 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 83ad4bcafc1f48dbb264c461d4793f50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732123566979 2024-11-20T17:26:12,881 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e3a63ce8fb04b4b84285ad597192378, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732123566979 2024-11-20T17:26:12,882 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 8013bb69446441f6817773fd890d0717, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732123568109 2024-11-20T17:26:12,882 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce95f502d1eb40869ed76cd9aecbc255, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732123568109 2024-11-20T17:26:12,882 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e185227ac70463e9e9366127b5ac9cb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732123569239 2024-11-20T17:26:12,882 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 57aa07db4ca048ceb9ad7f8a133cab63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732123569239 2024-11-20T17:26:12,882 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting e72479e5ed404345baa91844fdb00d87, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123569868 2024-11-20T17:26:12,882 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 89762bab8e0443d8ad6301670262910a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123569868 2024-11-20T17:26:12,890 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:12,895 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#B#compaction#313 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:12,896 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/7c304f43e53a46f09cb8adcb1c88aad5 is 50, key is test_row_0/B:col10/1732123572009/Put/seqid=0 2024-11-20T17:26:12,901 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120b39ad4a9ec974174beb0cf00c1992ed5_464447c196464f9bd851ab5282adadec store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:12,903 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120b39ad4a9ec974174beb0cf00c1992ed5_464447c196464f9bd851ab5282adadec, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:12,904 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b39ad4a9ec974174beb0cf00c1992ed5_464447c196464f9bd851ab5282adadec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:12,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742195_1371 (size=12527) 2024-11-20T17:26:12,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742196_1372 (size=4469) 2024-11-20T17:26:12,914 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#A#compaction#312 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:12,919 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/f3c4d299b6364bf6a18275dc1eba9d80 is 175, key is test_row_0/A:col10/1732123572009/Put/seqid=0 2024-11-20T17:26:12,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742197_1373 (size=31481) 2024-11-20T17:26:12,928 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/f3c4d299b6364bf6a18275dc1eba9d80 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/f3c4d299b6364bf6a18275dc1eba9d80 2024-11-20T17:26:12,933 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/A of 464447c196464f9bd851ab5282adadec into f3c4d299b6364bf6a18275dc1eba9d80(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:12,933 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:12,933 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/A, priority=12, startTime=1732123572879; duration=0sec 2024-11-20T17:26:12,933 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:12,933 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:A 2024-11-20T17:26:12,933 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:12,935 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:12,935 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/C is initiating minor compaction (all files) 2024-11-20T17:26:12,935 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/C in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:12,935 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fc27ad8e58df43b190e5dd4aa0ce43a3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/347f30ace34146099199d171b3409319, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/09df6c4db9c24b5fad558621fe243911, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/94d5d9cc5d644646ab108cdfaf8bfe58] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=47.5 K 2024-11-20T17:26:12,935 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc27ad8e58df43b190e5dd4aa0ce43a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732123566979 2024-11-20T17:26:12,935 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 347f30ace34146099199d171b3409319, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732123568109 2024-11-20T17:26:12,936 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09df6c4db9c24b5fad558621fe243911, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732123569239 2024-11-20T17:26:12,936 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94d5d9cc5d644646ab108cdfaf8bfe58, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123569868 2024-11-20T17:26:12,944 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#C#compaction#314 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:12,945 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/a3d8ed6541174f4dbc50858ac7ab18db is 50, key is test_row_0/C:col10/1732123572009/Put/seqid=0 2024-11-20T17:26:12,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742198_1374 (size=12527) 2024-11-20T17:26:12,955 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/a3d8ed6541174f4dbc50858ac7ab18db as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a3d8ed6541174f4dbc50858ac7ab18db 2024-11-20T17:26:12,959 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/C of 464447c196464f9bd851ab5282adadec into a3d8ed6541174f4dbc50858ac7ab18db(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:12,959 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:12,959 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/C, priority=12, startTime=1732123572879; duration=0sec 2024-11-20T17:26:12,959 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:12,960 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:C 2024-11-20T17:26:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:13,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T17:26:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:13,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:13,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123633166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123633167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123633168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209ac201faa9254e71bc063ad2d16ea280_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123572041/Put/seqid=0 2024-11-20T17:26:13,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123633169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123633169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742199_1375 (size=17284) 2024-11-20T17:26:13,175 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,179 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209ac201faa9254e71bc063ad2d16ea280_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209ac201faa9254e71bc063ad2d16ea280_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:13,180 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/34d0ccaead5341bd81daa00557488d39, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:13,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/34d0ccaead5341bd81daa00557488d39 is 175, key is test_row_0/A:col10/1732123572041/Put/seqid=0 2024-11-20T17:26:13,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742200_1376 (size=48389) 2024-11-20T17:26:13,190 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/34d0ccaead5341bd81daa00557488d39 2024-11-20T17:26:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T17:26:13,197 INFO [Thread-1492 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-11-20T17:26:13,198 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-20T17:26:13,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/6c17b76b756241be9bb142a1883f1f18 is 50, key is test_row_0/B:col10/1732123572041/Put/seqid=0 2024-11-20T17:26:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T17:26:13,199 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:13,201 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:13,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:13,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742201_1377 (size=12151) 2024-11-20T17:26:13,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123633270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123633271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123633271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123633274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123633275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T17:26:13,310 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/7c304f43e53a46f09cb8adcb1c88aad5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/7c304f43e53a46f09cb8adcb1c88aad5 2024-11-20T17:26:13,314 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/B of 464447c196464f9bd851ab5282adadec into 7c304f43e53a46f09cb8adcb1c88aad5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:13,314 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:13,314 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/B, priority=12, startTime=1732123572879; duration=0sec 2024-11-20T17:26:13,314 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:13,314 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:B 2024-11-20T17:26:13,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:13,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T17:26:13,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:13,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:13,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:13,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:13,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:13,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:13,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123633474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123633474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123633476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123633480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123633482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T17:26:13,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:13,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T17:26:13,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:13,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:13,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:13,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:13,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:13,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:13,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/6c17b76b756241be9bb142a1883f1f18 2024-11-20T17:26:13,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/c4ed123cac804c0cada63601576f1dca is 50, key is test_row_0/C:col10/1732123572041/Put/seqid=0 2024-11-20T17:26:13,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742202_1378 (size=12151) 2024-11-20T17:26:13,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/c4ed123cac804c0cada63601576f1dca 2024-11-20T17:26:13,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/34d0ccaead5341bd81daa00557488d39 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/34d0ccaead5341bd81daa00557488d39 2024-11-20T17:26:13,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/34d0ccaead5341bd81daa00557488d39, entries=250, sequenceid=201, filesize=47.3 K 2024-11-20T17:26:13,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/6c17b76b756241be9bb142a1883f1f18 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6c17b76b756241be9bb142a1883f1f18 2024-11-20T17:26:13,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6c17b76b756241be9bb142a1883f1f18, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T17:26:13,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/c4ed123cac804c0cada63601576f1dca as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c4ed123cac804c0cada63601576f1dca 2024-11-20T17:26:13,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c4ed123cac804c0cada63601576f1dca, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T17:26:13,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 464447c196464f9bd851ab5282adadec in 482ms, sequenceid=201, compaction requested=false 2024-11-20T17:26:13,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:13,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,663 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T17:26:13,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:13,663 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T17:26:13,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:13,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:13,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:13,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:13,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ae862555a0984231aca385f711248a9a_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123573168/Put/seqid=0 2024-11-20T17:26:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742203_1379 (size=9814) 2024-11-20T17:26:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,698 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ae862555a0984231aca385f711248a9a_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ae862555a0984231aca385f711248a9a_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:13,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:26:13,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/b0e25c87eb14428091a5bff177a63438, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/b0e25c87eb14428091a5bff177a63438 is 175, key is test_row_0/A:col10/1732123573168/Put/seqid=0 2024-11-20T17:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742204_1380 (size=22461) 2024-11-20T17:26:13,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,719 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/b0e25c87eb14428091a5bff177a63438 2024-11-20T17:26:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/fbfd6f141cf9485cbe7b8b2e037ca723 is 50, key is test_row_0/B:col10/1732123573168/Put/seqid=0 2024-11-20T17:26:13,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742205_1381 (size=9757) 2024-11-20T17:26:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:13,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T17:26:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:13,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123633867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123633868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123633872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123633873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123633873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123633979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123633980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123633980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123633980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:13,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:13,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123633982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,149 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/fbfd6f141cf9485cbe7b8b2e037ca723 2024-11-20T17:26:14,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/4afcc638660a44adb3c1384efcfc6417 is 50, key is test_row_0/C:col10/1732123573168/Put/seqid=0 2024-11-20T17:26:14,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742206_1382 (size=9757) 2024-11-20T17:26:14,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123634186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123634187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123634187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123634188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123634188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T17:26:14,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123634491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123634491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123634493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123634493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:14,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123634494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:14,560 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/4afcc638660a44adb3c1384efcfc6417 2024-11-20T17:26:14,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/b0e25c87eb14428091a5bff177a63438 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/b0e25c87eb14428091a5bff177a63438 2024-11-20T17:26:14,568 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/b0e25c87eb14428091a5bff177a63438, entries=100, sequenceid=212, filesize=21.9 K 2024-11-20T17:26:14,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/fbfd6f141cf9485cbe7b8b2e037ca723 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/fbfd6f141cf9485cbe7b8b2e037ca723 2024-11-20T17:26:14,572 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/fbfd6f141cf9485cbe7b8b2e037ca723, entries=100, sequenceid=212, filesize=9.5 K 2024-11-20T17:26:14,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/4afcc638660a44adb3c1384efcfc6417 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4afcc638660a44adb3c1384efcfc6417 2024-11-20T17:26:14,576 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4afcc638660a44adb3c1384efcfc6417, entries=100, sequenceid=212, filesize=9.5 K 2024-11-20T17:26:14,578 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 464447c196464f9bd851ab5282adadec in 915ms, sequenceid=212, compaction requested=true 2024-11-20T17:26:14,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:14,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:14,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-20T17:26:14,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-20T17:26:14,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-20T17:26:14,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3780 sec 2024-11-20T17:26:14,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.3830 sec 2024-11-20T17:26:14,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:14,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T17:26:15,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:15,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:15,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123635001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120228041722efa483bb9f0d5c6a2209846_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123574998/Put/seqid=0 2024-11-20T17:26:15,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123635005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123635005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123635006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742207_1383 (size=17284) 2024-11-20T17:26:15,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123635006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123635107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123635112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123635113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123635113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123635114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T17:26:15,303 INFO [Thread-1492 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-20T17:26:15,305 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:15,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-20T17:26:15,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:26:15,306 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:15,306 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:15,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:15,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123635311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123635316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123635317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123635317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123635318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:26:15,414 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,418 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120228041722efa483bb9f0d5c6a2209846_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120228041722efa483bb9f0d5c6a2209846_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:15,419 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/51192d115ecf4f4a8eddf28df13d0bab, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:15,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/51192d115ecf4f4a8eddf28df13d0bab is 175, key is test_row_0/A:col10/1732123574998/Put/seqid=0 2024-11-20T17:26:15,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742208_1384 (size=48389) 2024-11-20T17:26:15,425 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=241, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/51192d115ecf4f4a8eddf28df13d0bab 2024-11-20T17:26:15,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/2f54f91c87db49b38677203dc402c0f9 is 50, key is test_row_0/B:col10/1732123574998/Put/seqid=0 2024-11-20T17:26:15,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742209_1385 (size=12151) 2024-11-20T17:26:15,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/2f54f91c87db49b38677203dc402c0f9 2024-11-20T17:26:15,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/fcfe20a664954197a31dee138b94963c is 50, key is test_row_0/C:col10/1732123574998/Put/seqid=0 2024-11-20T17:26:15,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742210_1386 (size=12151) 2024-11-20T17:26:15,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/fcfe20a664954197a31dee138b94963c 2024-11-20T17:26:15,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/51192d115ecf4f4a8eddf28df13d0bab as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/51192d115ecf4f4a8eddf28df13d0bab 2024-11-20T17:26:15,458 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:15,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:26:15,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:15,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:15,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:15,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:15,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:15,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/51192d115ecf4f4a8eddf28df13d0bab, entries=250, sequenceid=241, filesize=47.3 K 2024-11-20T17:26:15,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:15,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/2f54f91c87db49b38677203dc402c0f9 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2f54f91c87db49b38677203dc402c0f9 2024-11-20T17:26:15,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2f54f91c87db49b38677203dc402c0f9, entries=150, sequenceid=241, filesize=11.9 K 2024-11-20T17:26:15,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/fcfe20a664954197a31dee138b94963c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcfe20a664954197a31dee138b94963c 2024-11-20T17:26:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcfe20a664954197a31dee138b94963c, entries=150, sequenceid=241, filesize=11.9 K 2024-11-20T17:26:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 464447c196464f9bd851ab5282adadec in 470ms, sequenceid=241, compaction requested=true 2024-11-20T17:26:15,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:15,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:15,470 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:15,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:15,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:15,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:15,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:15,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:26:15,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,470 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:15,471 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 150720 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:15,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,471 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/A is initiating minor compaction (all files) 2024-11-20T17:26:15,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,471 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/A in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:15,471 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/f3c4d299b6364bf6a18275dc1eba9d80, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/34d0ccaead5341bd81daa00557488d39, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/b0e25c87eb14428091a5bff177a63438, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/51192d115ecf4f4a8eddf28df13d0bab] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=147.2 K 2024-11-20T17:26:15,471 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:15,471 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/f3c4d299b6364bf6a18275dc1eba9d80, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/34d0ccaead5341bd81daa00557488d39, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/b0e25c87eb14428091a5bff177a63438, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/51192d115ecf4f4a8eddf28df13d0bab] 2024-11-20T17:26:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,472 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3c4d299b6364bf6a18275dc1eba9d80, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123569868 2024-11-20T17:26:15,472 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46586 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:15,472 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/B is initiating minor compaction (all files) 2024-11-20T17:26:15,472 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/B in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:15,472 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/7c304f43e53a46f09cb8adcb1c88aad5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6c17b76b756241be9bb142a1883f1f18, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/fbfd6f141cf9485cbe7b8b2e037ca723, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2f54f91c87db49b38677203dc402c0f9] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=45.5 K 2024-11-20T17:26:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,473 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34d0ccaead5341bd81daa00557488d39, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732123572036 2024-11-20T17:26:15,473 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c304f43e53a46f09cb8adcb1c88aad5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123569868 2024-11-20T17:26:15,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,473 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0e25c87eb14428091a5bff177a63438, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123573164 2024-11-20T17:26:15,473 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c17b76b756241be9bb142a1883f1f18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732123572041 2024-11-20T17:26:15,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,473 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting fbfd6f141cf9485cbe7b8b2e037ca723, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123573164 2024-11-20T17:26:15,473 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51192d115ecf4f4a8eddf28df13d0bab, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732123573867 2024-11-20T17:26:15,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,474 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f54f91c87db49b38677203dc402c0f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732123573871 2024-11-20T17:26:15,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,481 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:15,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,484 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#B#compaction#325 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,485 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112043d137d4f28f495885ea5cd141b28d5a_464447c196464f9bd851ab5282adadec store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,485 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/a41c89173aeb4a4cbe75c55b4274e818 is 50, key is test_row_0/B:col10/1732123574998/Put/seqid=0 2024-11-20T17:26:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,487 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112043d137d4f28f495885ea5cd141b28d5a_464447c196464f9bd851ab5282adadec, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:15,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,487 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112043d137d4f28f495885ea5cd141b28d5a_464447c196464f9bd851ab5282adadec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:15,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742211_1387 (size=4469) 2024-11-20T17:26:15,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,501 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#A#compaction#324 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:15,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,502 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/12730ecc800a428595e5aee3df803aed is 175, key is test_row_0/A:col10/1732123574998/Put/seqid=0 2024-11-20T17:26:15,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742212_1388 (size=12663) 2024-11-20T17:26:15,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,516 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/a41c89173aeb4a4cbe75c55b4274e818 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/a41c89173aeb4a4cbe75c55b4274e818 2024-11-20T17:26:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,521 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/B of 464447c196464f9bd851ab5282adadec into a41c89173aeb4a4cbe75c55b4274e818(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,521 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:15,521 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/B, priority=12, startTime=1732123575470; duration=0sec 2024-11-20T17:26:15,521 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:15,521 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:B 2024-11-20T17:26:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,521 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,523 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46586 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:15,523 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/C is initiating minor compaction (all files) 2024-11-20T17:26:15,523 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/C in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:15,523 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a3d8ed6541174f4dbc50858ac7ab18db, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c4ed123cac804c0cada63601576f1dca, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4afcc638660a44adb3c1384efcfc6417, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcfe20a664954197a31dee138b94963c] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=45.5 K 2024-11-20T17:26:15,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,523 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a3d8ed6541174f4dbc50858ac7ab18db, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123569868 2024-11-20T17:26:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,524 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c4ed123cac804c0cada63601576f1dca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732123572041 2024-11-20T17:26:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,524 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4afcc638660a44adb3c1384efcfc6417, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732123573164 2024-11-20T17:26:15,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,525 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting fcfe20a664954197a31dee138b94963c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732123573871 2024-11-20T17:26:15,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742213_1389 (size=31617) 2024-11-20T17:26:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,534 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#C#compaction#326 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:15,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,535 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/ec563a66560341d8a11814f659410df8 is 50, key is test_row_0/C:col10/1732123574998/Put/seqid=0 2024-11-20T17:26:15,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,540 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/12730ecc800a428595e5aee3df803aed as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/12730ecc800a428595e5aee3df803aed 2024-11-20T17:26:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742214_1390 (size=12663) 2024-11-20T17:26:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,545 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/A of 464447c196464f9bd851ab5282adadec into 12730ecc800a428595e5aee3df803aed(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:15,545 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:15,545 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/A, priority=12, startTime=1732123575469; duration=0sec 2024-11-20T17:26:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,545 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:15,545 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:A 2024-11-20T17:26:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:26:15,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,611 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:15,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T17:26:15,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:15,612 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T17:26:15,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:15,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:15,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:15,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:15,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:15,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:15,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209c9ddd99639d4d7e91922ddf204004bf_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123575004/Put/seqid=0 2024-11-20T17:26:15,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:15,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:15,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742215_1391 (size=9814) 2024-11-20T17:26:15,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,646 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209c9ddd99639d4d7e91922ddf204004bf_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209c9ddd99639d4d7e91922ddf204004bf_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:15,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/2d38e3511d73466ba929713d80982ad4, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:15,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/2d38e3511d73466ba929713d80982ad4 is 175, key is test_row_0/A:col10/1732123575004/Put/seqid=0 2024-11-20T17:26:15,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742216_1392 (size=22461) 2024-11-20T17:26:15,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:15,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123635717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123635717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123635718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123635719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123635720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123635825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123635825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123635826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123635826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:15,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123635826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:15,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:26:15,948 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/ec563a66560341d8a11814f659410df8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/ec563a66560341d8a11814f659410df8 2024-11-20T17:26:15,952 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/C of 464447c196464f9bd851ab5282adadec into ec563a66560341d8a11814f659410df8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:15,952 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:15,952 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/C, priority=12, startTime=1732123575470; duration=0sec 2024-11-20T17:26:15,952 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:15,952 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:C 2024-11-20T17:26:16,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123636029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123636030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123636030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123636030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123636030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,065 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/2d38e3511d73466ba929713d80982ad4 2024-11-20T17:26:16,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/2e8f09844f3d4aae8a56d8c1491f883f is 50, key is test_row_0/B:col10/1732123575004/Put/seqid=0 2024-11-20T17:26:16,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742217_1393 (size=9757) 2024-11-20T17:26:16,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123636336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123636336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123636337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123636337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123636338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:26:16,476 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/2e8f09844f3d4aae8a56d8c1491f883f 2024-11-20T17:26:16,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/4ec7145e8ac9479e83fa4c6c93e6b181 is 50, key is test_row_0/C:col10/1732123575004/Put/seqid=0 2024-11-20T17:26:16,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742218_1394 (size=9757) 2024-11-20T17:26:16,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123636840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123636840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123636841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123636841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:16,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123636845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:16,888 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/4ec7145e8ac9479e83fa4c6c93e6b181 2024-11-20T17:26:16,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/2d38e3511d73466ba929713d80982ad4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2d38e3511d73466ba929713d80982ad4 2024-11-20T17:26:16,896 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2d38e3511d73466ba929713d80982ad4, entries=100, sequenceid=250, filesize=21.9 K 2024-11-20T17:26:16,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/2e8f09844f3d4aae8a56d8c1491f883f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2e8f09844f3d4aae8a56d8c1491f883f 2024-11-20T17:26:16,902 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2e8f09844f3d4aae8a56d8c1491f883f, entries=100, sequenceid=250, filesize=9.5 K 2024-11-20T17:26:16,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/4ec7145e8ac9479e83fa4c6c93e6b181 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4ec7145e8ac9479e83fa4c6c93e6b181 2024-11-20T17:26:16,907 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4ec7145e8ac9479e83fa4c6c93e6b181, entries=100, sequenceid=250, filesize=9.5 K 2024-11-20T17:26:16,908 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 464447c196464f9bd851ab5282adadec in 1295ms, sequenceid=250, compaction requested=false 2024-11-20T17:26:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:16,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-20T17:26:16,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-20T17:26:16,910 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-20T17:26:16,910 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6020 sec 2024-11-20T17:26:16,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.6060 sec 2024-11-20T17:26:17,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T17:26:17,410 INFO [Thread-1492 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-20T17:26:17,411 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:17,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-20T17:26:17,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:26:17,412 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:17,413 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:17,413 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:17,490 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1afa8c2b7a58493d845ae91e6666b337, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/25891e63ca9f43468432918f6d6530b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3b2f005803a447b6bc2a80b525307e7b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/0e2835adfc704198a348ce4f35bb6d95, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2eac3ef816594a859147e3c78333ba6c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3e73c14ea152485faa7aa07ad88b5a8c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/4e3a63ce8fb04b4b84285ad597192378, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/96376774f09d4d6495107ab04a0c608c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/ce95f502d1eb40869ed76cd9aecbc255, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1e185227ac70463e9e9366127b5ac9cb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/e72479e5ed404345baa91844fdb00d87, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/f3c4d299b6364bf6a18275dc1eba9d80, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/34d0ccaead5341bd81daa00557488d39, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/b0e25c87eb14428091a5bff177a63438, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/51192d115ecf4f4a8eddf28df13d0bab] to archive 2024-11-20T17:26:17,491 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:17,492 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1afa8c2b7a58493d845ae91e6666b337 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1afa8c2b7a58493d845ae91e6666b337 2024-11-20T17:26:17,493 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/25891e63ca9f43468432918f6d6530b2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/25891e63ca9f43468432918f6d6530b2 2024-11-20T17:26:17,495 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3b2f005803a447b6bc2a80b525307e7b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3b2f005803a447b6bc2a80b525307e7b 2024-11-20T17:26:17,496 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/0e2835adfc704198a348ce4f35bb6d95 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/0e2835adfc704198a348ce4f35bb6d95 2024-11-20T17:26:17,497 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2eac3ef816594a859147e3c78333ba6c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2eac3ef816594a859147e3c78333ba6c 2024-11-20T17:26:17,498 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3e73c14ea152485faa7aa07ad88b5a8c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/3e73c14ea152485faa7aa07ad88b5a8c 2024-11-20T17:26:17,499 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/4e3a63ce8fb04b4b84285ad597192378 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/4e3a63ce8fb04b4b84285ad597192378 2024-11-20T17:26:17,500 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/96376774f09d4d6495107ab04a0c608c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/96376774f09d4d6495107ab04a0c608c 2024-11-20T17:26:17,501 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/ce95f502d1eb40869ed76cd9aecbc255 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/ce95f502d1eb40869ed76cd9aecbc255 2024-11-20T17:26:17,502 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1e185227ac70463e9e9366127b5ac9cb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1e185227ac70463e9e9366127b5ac9cb 2024-11-20T17:26:17,503 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/e72479e5ed404345baa91844fdb00d87 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/e72479e5ed404345baa91844fdb00d87 2024-11-20T17:26:17,504 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/f3c4d299b6364bf6a18275dc1eba9d80 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/f3c4d299b6364bf6a18275dc1eba9d80 2024-11-20T17:26:17,505 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/34d0ccaead5341bd81daa00557488d39 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/34d0ccaead5341bd81daa00557488d39 2024-11-20T17:26:17,506 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/b0e25c87eb14428091a5bff177a63438 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/b0e25c87eb14428091a5bff177a63438 2024-11-20T17:26:17,507 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/51192d115ecf4f4a8eddf28df13d0bab to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/51192d115ecf4f4a8eddf28df13d0bab 2024-11-20T17:26:17,509 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/4afa4bb516634f3f9be4a317e441ebfd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/653dd3c5018042ad89bab6fdd9df579b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/5a3bf1bc9c2e47518f02295ef8481704, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/137f4f340e194854a0b8f613bce1fcea, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6ba8823798824fa1b89fee429b21fffa, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/f3aca11a37a14345b49a2624f196f6b4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/83ad4bcafc1f48dbb264c461d4793f50, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/28bbc2a2f9234b73aa306bff4fafe594, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/8013bb69446441f6817773fd890d0717, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/57aa07db4ca048ceb9ad7f8a133cab63, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/7c304f43e53a46f09cb8adcb1c88aad5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/89762bab8e0443d8ad6301670262910a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6c17b76b756241be9bb142a1883f1f18, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/fbfd6f141cf9485cbe7b8b2e037ca723, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2f54f91c87db49b38677203dc402c0f9] to archive 2024-11-20T17:26:17,510 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:17,511 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/4afa4bb516634f3f9be4a317e441ebfd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/4afa4bb516634f3f9be4a317e441ebfd 2024-11-20T17:26:17,512 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/653dd3c5018042ad89bab6fdd9df579b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/653dd3c5018042ad89bab6fdd9df579b 2024-11-20T17:26:17,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:26:17,513 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/5a3bf1bc9c2e47518f02295ef8481704 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/5a3bf1bc9c2e47518f02295ef8481704 2024-11-20T17:26:17,514 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/137f4f340e194854a0b8f613bce1fcea to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/137f4f340e194854a0b8f613bce1fcea 2024-11-20T17:26:17,515 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6ba8823798824fa1b89fee429b21fffa to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6ba8823798824fa1b89fee429b21fffa 2024-11-20T17:26:17,516 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/f3aca11a37a14345b49a2624f196f6b4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/f3aca11a37a14345b49a2624f196f6b4 2024-11-20T17:26:17,517 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/83ad4bcafc1f48dbb264c461d4793f50 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/83ad4bcafc1f48dbb264c461d4793f50 2024-11-20T17:26:17,518 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/28bbc2a2f9234b73aa306bff4fafe594 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/28bbc2a2f9234b73aa306bff4fafe594 2024-11-20T17:26:17,519 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/8013bb69446441f6817773fd890d0717 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/8013bb69446441f6817773fd890d0717 2024-11-20T17:26:17,520 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/57aa07db4ca048ceb9ad7f8a133cab63 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/57aa07db4ca048ceb9ad7f8a133cab63 2024-11-20T17:26:17,521 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/7c304f43e53a46f09cb8adcb1c88aad5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/7c304f43e53a46f09cb8adcb1c88aad5 2024-11-20T17:26:17,522 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/89762bab8e0443d8ad6301670262910a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/89762bab8e0443d8ad6301670262910a 2024-11-20T17:26:17,523 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6c17b76b756241be9bb142a1883f1f18 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/6c17b76b756241be9bb142a1883f1f18 2024-11-20T17:26:17,524 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/fbfd6f141cf9485cbe7b8b2e037ca723 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/fbfd6f141cf9485cbe7b8b2e037ca723 2024-11-20T17:26:17,525 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2f54f91c87db49b38677203dc402c0f9 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2f54f91c87db49b38677203dc402c0f9 2024-11-20T17:26:17,528 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/e4a3a4f75a20455980578a0e306205ba, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cb9b6108db804fa685035d6f4eb01da0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c42f107ddeab49f8a25d99fb879130eb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3ec30834a9a24e86a1cf91f73318a083, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/2f9d6a5d96274f768fb26bbf23260946, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/8c4825d4324a4aad9b9124c78bbae2e8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fc27ad8e58df43b190e5dd4aa0ce43a3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf5e2b6004c348a3bf3570efb0895d1d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/347f30ace34146099199d171b3409319, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/09df6c4db9c24b5fad558621fe243911, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a3d8ed6541174f4dbc50858ac7ab18db, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/94d5d9cc5d644646ab108cdfaf8bfe58, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c4ed123cac804c0cada63601576f1dca, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4afcc638660a44adb3c1384efcfc6417, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcfe20a664954197a31dee138b94963c] to archive 2024-11-20T17:26:17,529 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:17,530 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/e4a3a4f75a20455980578a0e306205ba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/e4a3a4f75a20455980578a0e306205ba 2024-11-20T17:26:17,531 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cb9b6108db804fa685035d6f4eb01da0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cb9b6108db804fa685035d6f4eb01da0 2024-11-20T17:26:17,532 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c42f107ddeab49f8a25d99fb879130eb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c42f107ddeab49f8a25d99fb879130eb 2024-11-20T17:26:17,533 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3ec30834a9a24e86a1cf91f73318a083 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3ec30834a9a24e86a1cf91f73318a083 2024-11-20T17:26:17,534 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/2f9d6a5d96274f768fb26bbf23260946 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/2f9d6a5d96274f768fb26bbf23260946 2024-11-20T17:26:17,535 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/8c4825d4324a4aad9b9124c78bbae2e8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/8c4825d4324a4aad9b9124c78bbae2e8 2024-11-20T17:26:17,536 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fc27ad8e58df43b190e5dd4aa0ce43a3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fc27ad8e58df43b190e5dd4aa0ce43a3 2024-11-20T17:26:17,537 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf5e2b6004c348a3bf3570efb0895d1d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf5e2b6004c348a3bf3570efb0895d1d 2024-11-20T17:26:17,538 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/347f30ace34146099199d171b3409319 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/347f30ace34146099199d171b3409319 2024-11-20T17:26:17,539 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/09df6c4db9c24b5fad558621fe243911 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/09df6c4db9c24b5fad558621fe243911 2024-11-20T17:26:17,540 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a3d8ed6541174f4dbc50858ac7ab18db to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a3d8ed6541174f4dbc50858ac7ab18db 2024-11-20T17:26:17,541 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/94d5d9cc5d644646ab108cdfaf8bfe58 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/94d5d9cc5d644646ab108cdfaf8bfe58 2024-11-20T17:26:17,542 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c4ed123cac804c0cada63601576f1dca to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/c4ed123cac804c0cada63601576f1dca 2024-11-20T17:26:17,543 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4afcc638660a44adb3c1384efcfc6417 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4afcc638660a44adb3c1384efcfc6417 2024-11-20T17:26:17,544 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/d514dc944523:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcfe20a664954197a31dee138b94963c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcfe20a664954197a31dee138b94963c 2024-11-20T17:26:17,564 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:17,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T17:26:17,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:17,565 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T17:26:17,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:17,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:17,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:17,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:17,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:17,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:17,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112085a3cfbaa3ee417a8670fc9c65075136_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123575715/Put/seqid=0 2024-11-20T17:26:17,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742219_1395 (size=12454) 2024-11-20T17:26:17,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:26:17,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:17,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:17,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123637850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123637850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123637852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123637853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123637854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123637955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123637958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123637958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:17,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123637958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:17,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:17,980 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112085a3cfbaa3ee417a8670fc9c65075136_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112085a3cfbaa3ee417a8670fc9c65075136_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:17,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/cfa35323ceb14061b4a7a672e7e94cb5, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:17,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/cfa35323ceb14061b4a7a672e7e94cb5 is 175, key is test_row_0/A:col10/1732123575715/Put/seqid=0 2024-11-20T17:26:17,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742220_1396 (size=31255) 2024-11-20T17:26:17,986 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=280, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/cfa35323ceb14061b4a7a672e7e94cb5 2024-11-20T17:26:17,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/3ed660e04785416d950ef29837bdc1b2 is 50, key is test_row_0/B:col10/1732123575715/Put/seqid=0 2024-11-20T17:26:17,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742221_1397 (size=12301) 2024-11-20T17:26:18,000 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/3ed660e04785416d950ef29837bdc1b2 2024-11-20T17:26:18,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cf2e24b8400d4e5792b3655c2c4f3c62 is 50, key is test_row_0/C:col10/1732123575715/Put/seqid=0 2024-11-20T17:26:18,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742222_1398 (size=12301) 2024-11-20T17:26:18,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:26:18,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123638158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123638161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123638162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123638162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,412 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cf2e24b8400d4e5792b3655c2c4f3c62 2024-11-20T17:26:18,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/cfa35323ceb14061b4a7a672e7e94cb5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/cfa35323ceb14061b4a7a672e7e94cb5 2024-11-20T17:26:18,419 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/cfa35323ceb14061b4a7a672e7e94cb5, entries=150, sequenceid=280, filesize=30.5 K 2024-11-20T17:26:18,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/3ed660e04785416d950ef29837bdc1b2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3ed660e04785416d950ef29837bdc1b2 2024-11-20T17:26:18,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,423 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3ed660e04785416d950ef29837bdc1b2, entries=150, sequenceid=280, filesize=12.0 K 2024-11-20T17:26:18,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/cf2e24b8400d4e5792b3655c2c4f3c62 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf2e24b8400d4e5792b3655c2c4f3c62 2024-11-20T17:26:18,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,428 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf2e24b8400d4e5792b3655c2c4f3c62, entries=150, sequenceid=280, filesize=12.0 K 2024-11-20T17:26:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,428 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 464447c196464f9bd851ab5282adadec in 863ms, sequenceid=280, compaction requested=true 2024-11-20T17:26:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:18,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-20T17:26:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-20T17:26:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T17:26:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0170 sec 2024-11-20T17:26:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,433 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.0210 sec 2024-11-20T17:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:18,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:18,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:18,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:18,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:18,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:18,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:18,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:18,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112026b77b0ddafc4fb1870338e735bbb22a_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123578498/Put/seqid=0 2024-11-20T17:26:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T17:26:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,516 INFO [Thread-1492 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-20T17:26:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,518 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-20T17:26:18,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742223_1399 (size=20074) 2024-11-20T17:26:18,520 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:18,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,520 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:18,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:18,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,521 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:26:18,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,526 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112026b77b0ddafc4fb1870338e735bbb22a_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112026b77b0ddafc4fb1870338e735bbb22a_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:18,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,527 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/7830dfb1ace447edb64afd7791e86dae, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:18,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/7830dfb1ace447edb64afd7791e86dae is 175, key is test_row_0/A:col10/1732123578498/Put/seqid=0 2024-11-20T17:26:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742224_1400 (size=57333) 2024-11-20T17:26:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,538 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/7830dfb1ace447edb64afd7791e86dae 2024-11-20T17:26:18,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/ad1ac912105141ec82842d9de6d378a0 is 50, key is test_row_0/B:col10/1732123578498/Put/seqid=0 2024-11-20T17:26:18,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742225_1401 (size=19621) 2024-11-20T17:26:18,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/ad1ac912105141ec82842d9de6d378a0 2024-11-20T17:26:18,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3bfbf4fc36a54f39b5eb0a0015d0642a is 50, key is test_row_0/C:col10/1732123578498/Put/seqid=0 2024-11-20T17:26:18,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742226_1402 (size=12301) 2024-11-20T17:26:18,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3bfbf4fc36a54f39b5eb0a0015d0642a 2024-11-20T17:26:18,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/7830dfb1ace447edb64afd7791e86dae as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/7830dfb1ace447edb64afd7791e86dae 2024-11-20T17:26:18,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/7830dfb1ace447edb64afd7791e86dae, entries=300, sequenceid=291, filesize=56.0 K 2024-11-20T17:26:18,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/ad1ac912105141ec82842d9de6d378a0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ad1ac912105141ec82842d9de6d378a0 2024-11-20T17:26:18,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ad1ac912105141ec82842d9de6d378a0, entries=300, sequenceid=291, filesize=19.2 K 2024-11-20T17:26:18,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3bfbf4fc36a54f39b5eb0a0015d0642a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3bfbf4fc36a54f39b5eb0a0015d0642a 2024-11-20T17:26:18,596 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3bfbf4fc36a54f39b5eb0a0015d0642a, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T17:26:18,599 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=114.05 KB/116790 for 464447c196464f9bd851ab5282adadec in 100ms, sequenceid=291, compaction requested=true 2024-11-20T17:26:18,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:18,600 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:18,600 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:18,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:18,602 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54342 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:18,602 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/B is initiating minor compaction (all files) 2024-11-20T17:26:18,602 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/B in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,602 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142666 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:18,602 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/A is initiating minor compaction (all files) 2024-11-20T17:26:18,602 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/a41c89173aeb4a4cbe75c55b4274e818, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2e8f09844f3d4aae8a56d8c1491f883f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3ed660e04785416d950ef29837bdc1b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ad1ac912105141ec82842d9de6d378a0] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=53.1 K 2024-11-20T17:26:18,602 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/A in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,602 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/12730ecc800a428595e5aee3df803aed, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2d38e3511d73466ba929713d80982ad4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/cfa35323ceb14061b4a7a672e7e94cb5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/7830dfb1ace447edb64afd7791e86dae] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=139.3 K 2024-11-20T17:26:18,602 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,602 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/12730ecc800a428595e5aee3df803aed, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2d38e3511d73466ba929713d80982ad4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/cfa35323ceb14061b4a7a672e7e94cb5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/7830dfb1ace447edb64afd7791e86dae] 2024-11-20T17:26:18,602 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a41c89173aeb4a4cbe75c55b4274e818, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732123573871 2024-11-20T17:26:18,603 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12730ecc800a428595e5aee3df803aed, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732123573871 2024-11-20T17:26:18,603 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e8f09844f3d4aae8a56d8c1491f883f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123575004 2024-11-20T17:26:18,603 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d38e3511d73466ba929713d80982ad4, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123575004 2024-11-20T17:26:18,603 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ed660e04785416d950ef29837bdc1b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123575715 2024-11-20T17:26:18,609 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfa35323ceb14061b4a7a672e7e94cb5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123575715 2024-11-20T17:26:18,609 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ad1ac912105141ec82842d9de6d378a0, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123577851 2024-11-20T17:26:18,609 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7830dfb1ace447edb64afd7791e86dae, keycount=300, bloomtype=ROW, size=56.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123577851 2024-11-20T17:26:18,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112074c3bb79ba044041ab320425d73a0ffe_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123578578/Put/seqid=0 2024-11-20T17:26:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:26:18,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742227_1403 (size=12454) 2024-11-20T17:26:18,632 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:18,635 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#B#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:18,635 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/41914f94ac374e7fa48e5192d1b247ce is 50, key is test_row_0/B:col10/1732123578498/Put/seqid=0 2024-11-20T17:26:18,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123638626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123638628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123638629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,641 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112074c3bb79ba044041ab320425d73a0ffe_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112074c3bb79ba044041ab320425d73a0ffe_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:18,641 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:18,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123638631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,648 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/70ae010f71b644fead7104144fb1dc3f, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:18,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/70ae010f71b644fead7104144fb1dc3f is 175, key is test_row_0/A:col10/1732123578578/Put/seqid=0 2024-11-20T17:26:18,657 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120b924fcd3468b4d4780d256756afe1c09_464447c196464f9bd851ab5282adadec store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:18,660 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120b924fcd3468b4d4780d256756afe1c09_464447c196464f9bd851ab5282adadec, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:18,660 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b924fcd3468b4d4780d256756afe1c09_464447c196464f9bd851ab5282adadec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:18,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742228_1404 (size=12439) 2024-11-20T17:26:18,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742229_1405 (size=31255) 2024-11-20T17:26:18,669 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=314, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/70ae010f71b644fead7104144fb1dc3f 2024-11-20T17:26:18,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:18,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T17:26:18,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:18,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:18,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:18,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742230_1406 (size=4469) 2024-11-20T17:26:18,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/afaab08e3df343c8a86b932da8e88703 is 50, key is test_row_0/B:col10/1732123578578/Put/seqid=0 2024-11-20T17:26:18,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742231_1407 (size=12301) 2024-11-20T17:26:18,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123638738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123638742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123638742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123638748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:26:18,825 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:18,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T17:26:18,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:18,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:18,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:18,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123638945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123638948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123638949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123638955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:18,978 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:18,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T17:26:18,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:18,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:18,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:18,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:18,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,069 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/41914f94ac374e7fa48e5192d1b247ce as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/41914f94ac374e7fa48e5192d1b247ce 2024-11-20T17:26:19,073 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/B of 464447c196464f9bd851ab5282adadec into 41914f94ac374e7fa48e5192d1b247ce(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:19,073 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:19,073 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/B, priority=12, startTime=1732123578600; duration=0sec 2024-11-20T17:26:19,073 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:19,073 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:B 2024-11-20T17:26:19,073 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:19,075 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:19,075 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/C is initiating minor compaction (all files) 2024-11-20T17:26:19,075 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/C in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:19,075 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/ec563a66560341d8a11814f659410df8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4ec7145e8ac9479e83fa4c6c93e6b181, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf2e24b8400d4e5792b3655c2c4f3c62, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3bfbf4fc36a54f39b5eb0a0015d0642a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=45.9 K 2024-11-20T17:26:19,075 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ec563a66560341d8a11814f659410df8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1732123573871 2024-11-20T17:26:19,075 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ec7145e8ac9479e83fa4c6c93e6b181, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732123575004 2024-11-20T17:26:19,076 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting cf2e24b8400d4e5792b3655c2c4f3c62, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732123575715 2024-11-20T17:26:19,076 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bfbf4fc36a54f39b5eb0a0015d0642a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123577851 2024-11-20T17:26:19,082 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#A#compaction#338 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:19,082 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1d7736ce54b841e59224292e66227e0f is 175, key is test_row_0/A:col10/1732123578498/Put/seqid=0 2024-11-20T17:26:19,083 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#C#compaction#340 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:19,084 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/fcdfa7c0695346429437c1a1e42600b6 is 50, key is test_row_0/C:col10/1732123578498/Put/seqid=0 2024-11-20T17:26:19,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742232_1408 (size=31393) 2024-11-20T17:26:19,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742233_1409 (size=12439) 2024-11-20T17:26:19,092 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/fcdfa7c0695346429437c1a1e42600b6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcdfa7c0695346429437c1a1e42600b6 2024-11-20T17:26:19,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/afaab08e3df343c8a86b932da8e88703 2024-11-20T17:26:19,097 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/C of 464447c196464f9bd851ab5282adadec into fcdfa7c0695346429437c1a1e42600b6(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:19,097 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:19,097 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/C, priority=12, startTime=1732123578600; duration=0sec 2024-11-20T17:26:19,097 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:19,097 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:C 2024-11-20T17:26:19,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/a04ee55b0dfe44e5b2088ce31ae59ee5 is 50, key is test_row_0/C:col10/1732123578578/Put/seqid=0 2024-11-20T17:26:19,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742234_1410 (size=12301) 2024-11-20T17:26:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:26:19,131 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:19,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T17:26:19,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:19,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:19,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:19,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123639248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123639256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123639256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123639261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,284 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:19,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T17:26:19,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:19,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:19,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:19,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,437 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:19,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T17:26:19,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:19,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:19,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:19,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:19,491 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1d7736ce54b841e59224292e66227e0f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1d7736ce54b841e59224292e66227e0f 2024-11-20T17:26:19,495 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/A of 464447c196464f9bd851ab5282adadec into 1d7736ce54b841e59224292e66227e0f(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:19,495 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:19,495 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/A, priority=12, startTime=1732123578600; duration=0sec 2024-11-20T17:26:19,495 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:19,495 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:A 2024-11-20T17:26:19,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/a04ee55b0dfe44e5b2088ce31ae59ee5 2024-11-20T17:26:19,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/70ae010f71b644fead7104144fb1dc3f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/70ae010f71b644fead7104144fb1dc3f 2024-11-20T17:26:19,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/70ae010f71b644fead7104144fb1dc3f, entries=150, sequenceid=314, filesize=30.5 K 2024-11-20T17:26:19,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/afaab08e3df343c8a86b932da8e88703 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/afaab08e3df343c8a86b932da8e88703 2024-11-20T17:26:19,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/afaab08e3df343c8a86b932da8e88703, entries=150, sequenceid=314, filesize=12.0 K 2024-11-20T17:26:19,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/a04ee55b0dfe44e5b2088ce31ae59ee5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a04ee55b0dfe44e5b2088ce31ae59ee5 2024-11-20T17:26:19,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a04ee55b0dfe44e5b2088ce31ae59ee5, entries=150, sequenceid=314, filesize=12.0 K 2024-11-20T17:26:19,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 464447c196464f9bd851ab5282adadec in 922ms, sequenceid=314, compaction requested=false 2024-11-20T17:26:19,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:19,589 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:19,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T17:26:19,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:19,590 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:26:19,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:19,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:19,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:19,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:19,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:19,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:19,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112084a215bcc8cc4ad8ade757341ed79deb_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123578629/Put/seqid=0 2024-11-20T17:26:19,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742235_1411 (size=12454) 2024-11-20T17:26:19,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:26:19,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:19,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. as already flushing 2024-11-20T17:26:19,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123639784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123639787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123639787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123639788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56346 deadline: 1732123639855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,858 DEBUG [Thread-1484 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:26:19,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123639890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123639893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123639893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:19,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123639894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:20,004 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112084a215bcc8cc4ad8ade757341ed79deb_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112084a215bcc8cc4ad8ade757341ed79deb_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:20,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/242ce37945744c49989dbfd5c932b0ad, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:20,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/242ce37945744c49989dbfd5c932b0ad is 175, key is test_row_0/A:col10/1732123578629/Put/seqid=0 2024-11-20T17:26:20,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742236_1412 (size=31255) 2024-11-20T17:26:20,011 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/242ce37945744c49989dbfd5c932b0ad 2024-11-20T17:26:20,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/3fad7a828c444a87b1273ef42fb36938 is 50, key is test_row_0/B:col10/1732123578629/Put/seqid=0 2024-11-20T17:26:20,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742237_1413 (size=12301) 2024-11-20T17:26:20,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:20,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123640094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123640097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123640099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:20,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123640099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:20,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56364 deadline: 1732123640401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:20,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56394 deadline: 1732123640401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:20,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56372 deadline: 1732123640403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:20,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56358 deadline: 1732123640403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:20,433 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/3fad7a828c444a87b1273ef42fb36938 2024-11-20T17:26:20,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3f8606f2336a471692c50874cd0fc10a is 50, key is test_row_0/C:col10/1732123578629/Put/seqid=0 2024-11-20T17:26:20,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742238_1414 (size=12301) 2024-11-20T17:26:20,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:26:20,844 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3f8606f2336a471692c50874cd0fc10a 2024-11-20T17:26:20,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/242ce37945744c49989dbfd5c932b0ad as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/242ce37945744c49989dbfd5c932b0ad 2024-11-20T17:26:20,852 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/242ce37945744c49989dbfd5c932b0ad, entries=150, sequenceid=330, filesize=30.5 K 2024-11-20T17:26:20,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/3fad7a828c444a87b1273ef42fb36938 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3fad7a828c444a87b1273ef42fb36938 2024-11-20T17:26:20,856 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3fad7a828c444a87b1273ef42fb36938, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T17:26:20,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/3f8606f2336a471692c50874cd0fc10a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3f8606f2336a471692c50874cd0fc10a 2024-11-20T17:26:20,860 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3f8606f2336a471692c50874cd0fc10a, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T17:26:20,860 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 464447c196464f9bd851ab5282adadec in 1270ms, sequenceid=330, compaction requested=true 2024-11-20T17:26:20,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:20,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:20,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-20T17:26:20,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-20T17:26:20,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-20T17:26:20,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3420 sec 2024-11-20T17:26:20,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.3450 sec 2024-11-20T17:26:20,873 DEBUG [Thread-1497 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x28dc77ab to 127.0.0.1:56028 2024-11-20T17:26:20,873 DEBUG [Thread-1497 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,874 DEBUG [Thread-1495 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45ad0ff5 to 127.0.0.1:56028 2024-11-20T17:26:20,874 DEBUG [Thread-1495 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,877 DEBUG [Thread-1493 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b660061 to 127.0.0.1:56028 2024-11-20T17:26:20,877 DEBUG [Thread-1493 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,877 DEBUG [Thread-1499 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70304ef6 to 127.0.0.1:56028 2024-11-20T17:26:20,877 DEBUG [Thread-1499 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,879 DEBUG [Thread-1501 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f8ea360 to 127.0.0.1:56028 2024-11-20T17:26:20,879 DEBUG [Thread-1501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:20,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:26:20,908 DEBUG [Thread-1488 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15bd9063 to 127.0.0.1:56028 2024-11-20T17:26:20,908 DEBUG [Thread-1488 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:20,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:20,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:20,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:20,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:20,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:20,910 DEBUG [Thread-1486 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x77b5b03d to 127.0.0.1:56028 2024-11-20T17:26:20,910 DEBUG [Thread-1486 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,910 DEBUG [Thread-1490 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c0ec341 to 127.0.0.1:56028 2024-11-20T17:26:20,910 DEBUG [Thread-1490 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,910 DEBUG [Thread-1482 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1157d18a to 127.0.0.1:56028 2024-11-20T17:26:20,910 DEBUG [Thread-1482 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:20,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112064a415429cd24533acd92c2c1d460419_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123579787/Put/seqid=0 2024-11-20T17:26:20,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742239_1415 (size=12454) 2024-11-20T17:26:21,318 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:21,322 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112064a415429cd24533acd92c2c1d460419_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112064a415429cd24533acd92c2c1d460419_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:21,322 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/fcefe579703b4ecbace8a2f11dce6dc9, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:21,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/fcefe579703b4ecbace8a2f11dce6dc9 is 175, key is test_row_0/A:col10/1732123579787/Put/seqid=0 2024-11-20T17:26:21,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742240_1416 (size=31255) 2024-11-20T17:26:21,727 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=354, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/fcefe579703b4ecbace8a2f11dce6dc9 2024-11-20T17:26:21,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/86946b2637d14c4585f72ffdbbed921b is 50, key is test_row_0/B:col10/1732123579787/Put/seqid=0 2024-11-20T17:26:21,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742241_1417 (size=12301) 2024-11-20T17:26:22,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/86946b2637d14c4585f72ffdbbed921b 2024-11-20T17:26:22,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/72f6b379b3974a9c94441946b04881b7 is 50, key is test_row_0/C:col10/1732123579787/Put/seqid=0 2024-11-20T17:26:22,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742242_1418 (size=12301) 2024-11-20T17:26:22,546 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/72f6b379b3974a9c94441946b04881b7 2024-11-20T17:26:22,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/fcefe579703b4ecbace8a2f11dce6dc9 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/fcefe579703b4ecbace8a2f11dce6dc9 2024-11-20T17:26:22,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/fcefe579703b4ecbace8a2f11dce6dc9, entries=150, sequenceid=354, filesize=30.5 K 2024-11-20T17:26:22,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/86946b2637d14c4585f72ffdbbed921b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/86946b2637d14c4585f72ffdbbed921b 2024-11-20T17:26:22,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/86946b2637d14c4585f72ffdbbed921b, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T17:26:22,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/72f6b379b3974a9c94441946b04881b7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/72f6b379b3974a9c94441946b04881b7 2024-11-20T17:26:22,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/72f6b379b3974a9c94441946b04881b7, entries=150, sequenceid=354, filesize=12.0 K 2024-11-20T17:26:22,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=20.13 KB/20610 for 464447c196464f9bd851ab5282adadec in 1651ms, sequenceid=354, compaction requested=true 2024-11-20T17:26:22,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:22,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:22,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:22,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:22,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:22,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 464447c196464f9bd851ab5282adadec:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:22,559 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:22,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:22,559 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49342 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/B is initiating minor compaction (all files) 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/A is initiating minor compaction (all files) 2024-11-20T17:26:22,560 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/B in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:22,560 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/A in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:22,560 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/41914f94ac374e7fa48e5192d1b247ce, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/afaab08e3df343c8a86b932da8e88703, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3fad7a828c444a87b1273ef42fb36938, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/86946b2637d14c4585f72ffdbbed921b] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=48.2 K 2024-11-20T17:26:22,560 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1d7736ce54b841e59224292e66227e0f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/70ae010f71b644fead7104144fb1dc3f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/242ce37945744c49989dbfd5c932b0ad, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/fcefe579703b4ecbace8a2f11dce6dc9] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=122.2 K 2024-11-20T17:26:22,560 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1d7736ce54b841e59224292e66227e0f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/70ae010f71b644fead7104144fb1dc3f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/242ce37945744c49989dbfd5c932b0ad, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/fcefe579703b4ecbace8a2f11dce6dc9] 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 41914f94ac374e7fa48e5192d1b247ce, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123577851 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d7736ce54b841e59224292e66227e0f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123577851 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting afaab08e3df343c8a86b932da8e88703, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732123578558 2024-11-20T17:26:22,560 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70ae010f71b644fead7104144fb1dc3f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732123578558 2024-11-20T17:26:22,561 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fad7a828c444a87b1273ef42fb36938, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732123578613 2024-11-20T17:26:22,561 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 242ce37945744c49989dbfd5c932b0ad, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732123578613 2024-11-20T17:26:22,561 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 86946b2637d14c4585f72ffdbbed921b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732123579786 2024-11-20T17:26:22,561 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcefe579703b4ecbace8a2f11dce6dc9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732123579786 2024-11-20T17:26:22,567 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:22,568 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120ae709dd0ceea4e6b9b8106a09fa0f549_464447c196464f9bd851ab5282adadec store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:22,569 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#B#compaction#348 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:22,569 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/ff10ea6c78974a16988db03ce9f8f1f4 is 50, key is test_row_0/B:col10/1732123579787/Put/seqid=0 2024-11-20T17:26:22,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742243_1419 (size=12575) 2024-11-20T17:26:22,585 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120ae709dd0ceea4e6b9b8106a09fa0f549_464447c196464f9bd851ab5282adadec, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:22,585 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ae709dd0ceea4e6b9b8106a09fa0f549_464447c196464f9bd851ab5282adadec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:22,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742244_1420 (size=4469) 2024-11-20T17:26:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T17:26:22,626 INFO [Thread-1492 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-20T17:26:22,977 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/ff10ea6c78974a16988db03ce9f8f1f4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ff10ea6c78974a16988db03ce9f8f1f4 2024-11-20T17:26:22,980 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/B of 464447c196464f9bd851ab5282adadec into ff10ea6c78974a16988db03ce9f8f1f4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:22,980 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:22,980 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/B, priority=12, startTime=1732123582559; duration=0sec 2024-11-20T17:26:22,980 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:22,980 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:B 2024-11-20T17:26:22,980 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:22,981 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49342 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:22,981 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 464447c196464f9bd851ab5282adadec/C is initiating minor compaction (all files) 2024-11-20T17:26:22,981 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 464447c196464f9bd851ab5282adadec/C in TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:22,981 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcdfa7c0695346429437c1a1e42600b6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a04ee55b0dfe44e5b2088ce31ae59ee5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3f8606f2336a471692c50874cd0fc10a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/72f6b379b3974a9c94441946b04881b7] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp, totalSize=48.2 K 2024-11-20T17:26:22,981 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting fcdfa7c0695346429437c1a1e42600b6, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732123577851 2024-11-20T17:26:22,982 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a04ee55b0dfe44e5b2088ce31ae59ee5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732123578558 2024-11-20T17:26:22,982 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f8606f2336a471692c50874cd0fc10a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732123578613 2024-11-20T17:26:22,982 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 72f6b379b3974a9c94441946b04881b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1732123579786 2024-11-20T17:26:22,988 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#C#compaction#350 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:22,988 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/63de9511a3a048d39aa1f302df0f520e is 50, key is test_row_0/C:col10/1732123579787/Put/seqid=0 2024-11-20T17:26:22,990 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 464447c196464f9bd851ab5282adadec#A#compaction#349 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:22,991 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1f4932c7d7ae4e3cabc3add936d94138 is 175, key is test_row_0/A:col10/1732123579787/Put/seqid=0 2024-11-20T17:26:22,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742245_1421 (size=12575) 2024-11-20T17:26:22,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742246_1422 (size=31529) 2024-11-20T17:26:23,396 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/63de9511a3a048d39aa1f302df0f520e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/63de9511a3a048d39aa1f302df0f520e 2024-11-20T17:26:23,397 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/1f4932c7d7ae4e3cabc3add936d94138 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1f4932c7d7ae4e3cabc3add936d94138 2024-11-20T17:26:23,400 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/C of 464447c196464f9bd851ab5282adadec into 63de9511a3a048d39aa1f302df0f520e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:23,400 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:23,400 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/C, priority=12, startTime=1732123582559; duration=0sec 2024-11-20T17:26:23,400 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:23,400 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:C 2024-11-20T17:26:23,400 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 464447c196464f9bd851ab5282adadec/A of 464447c196464f9bd851ab5282adadec into 1f4932c7d7ae4e3cabc3add936d94138(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:23,400 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:23,400 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec., storeName=464447c196464f9bd851ab5282adadec/A, priority=12, startTime=1732123582559; duration=0sec 2024-11-20T17:26:23,401 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:23,401 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 464447c196464f9bd851ab5282adadec:A 2024-11-20T17:26:23,861 DEBUG [Thread-1484 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x353bcb3d to 127.0.0.1:56028 2024-11-20T17:26:23,861 DEBUG [Thread-1484 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2728 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8181 rows 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2747 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8239 rows 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2740 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8215 rows 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2755 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8261 rows 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2748 2024-11-20T17:26:23,861 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8242 rows 2024-11-20T17:26:23,861 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:26:23,861 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dacfd49 to 127.0.0.1:56028 2024-11-20T17:26:23,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:23,863 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:26:23,863 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:26:23,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:23,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:26:23,867 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123583867"}]},"ts":"1732123583867"} 2024-11-20T17:26:23,868 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:26:23,871 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:26:23,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:26:23,872 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, UNASSIGN}] 2024-11-20T17:26:23,873 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, UNASSIGN 2024-11-20T17:26:23,873 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=464447c196464f9bd851ab5282adadec, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:23,874 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:26:23,874 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; CloseRegionProcedure 464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:26:23,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:26:24,025 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:24,025 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(124): Close 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1681): Closing 464447c196464f9bd851ab5282adadec, disabling compactions & flushes 2024-11-20T17:26:24,026 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. after waiting 0 ms 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:24,026 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2837): Flushing 464447c196464f9bd851ab5282adadec 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=A 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=B 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 464447c196464f9bd851ab5282adadec, store=C 2024-11-20T17:26:24,026 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:24,031 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112088d65daeaebc4b58a2053f09803c082a_464447c196464f9bd851ab5282adadec is 50, key is test_row_0/A:col10/1732123580908/Put/seqid=0 2024-11-20T17:26:24,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742247_1423 (size=9914) 2024-11-20T17:26:24,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:26:24,435 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:24,438 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112088d65daeaebc4b58a2053f09803c082a_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088d65daeaebc4b58a2053f09803c082a_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:24,439 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/282a1dce83954debad950878b353cd83, store: [table=TestAcidGuarantees family=A region=464447c196464f9bd851ab5282adadec] 2024-11-20T17:26:24,439 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/282a1dce83954debad950878b353cd83 is 175, key is test_row_0/A:col10/1732123580908/Put/seqid=0 2024-11-20T17:26:24,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742248_1424 (size=22561) 2024-11-20T17:26:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:26:24,843 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/282a1dce83954debad950878b353cd83 2024-11-20T17:26:24,849 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/31e5507f70e54d55a0670f1f3114bf51 is 50, key is test_row_0/B:col10/1732123580908/Put/seqid=0 2024-11-20T17:26:24,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742249_1425 (size=9857) 2024-11-20T17:26:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:26:25,253 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/31e5507f70e54d55a0670f1f3114bf51 2024-11-20T17:26:25,259 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/b8040a9c3b864f83a5b8296efd08edc2 is 50, key is test_row_0/C:col10/1732123580908/Put/seqid=0 2024-11-20T17:26:25,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742250_1426 (size=9857) 2024-11-20T17:26:25,662 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/b8040a9c3b864f83a5b8296efd08edc2 2024-11-20T17:26:25,666 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/A/282a1dce83954debad950878b353cd83 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/282a1dce83954debad950878b353cd83 2024-11-20T17:26:25,669 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/282a1dce83954debad950878b353cd83, entries=100, sequenceid=364, filesize=22.0 K 2024-11-20T17:26:25,670 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/B/31e5507f70e54d55a0670f1f3114bf51 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/31e5507f70e54d55a0670f1f3114bf51 2024-11-20T17:26:25,673 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/31e5507f70e54d55a0670f1f3114bf51, entries=100, sequenceid=364, filesize=9.6 K 2024-11-20T17:26:25,674 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/.tmp/C/b8040a9c3b864f83a5b8296efd08edc2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/b8040a9c3b864f83a5b8296efd08edc2 2024-11-20T17:26:25,676 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/b8040a9c3b864f83a5b8296efd08edc2, entries=100, sequenceid=364, filesize=9.6 K 2024-11-20T17:26:25,677 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 464447c196464f9bd851ab5282adadec in 1651ms, sequenceid=364, compaction requested=false 2024-11-20T17:26:25,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/12730ecc800a428595e5aee3df803aed, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2d38e3511d73466ba929713d80982ad4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/cfa35323ceb14061b4a7a672e7e94cb5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/7830dfb1ace447edb64afd7791e86dae, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1d7736ce54b841e59224292e66227e0f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/70ae010f71b644fead7104144fb1dc3f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/242ce37945744c49989dbfd5c932b0ad, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/fcefe579703b4ecbace8a2f11dce6dc9] to archive 2024-11-20T17:26:25,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:25,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/12730ecc800a428595e5aee3df803aed to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/12730ecc800a428595e5aee3df803aed 2024-11-20T17:26:25,680 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2d38e3511d73466ba929713d80982ad4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/2d38e3511d73466ba929713d80982ad4 2024-11-20T17:26:25,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/cfa35323ceb14061b4a7a672e7e94cb5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/cfa35323ceb14061b4a7a672e7e94cb5 2024-11-20T17:26:25,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/7830dfb1ace447edb64afd7791e86dae to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/7830dfb1ace447edb64afd7791e86dae 2024-11-20T17:26:25,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1d7736ce54b841e59224292e66227e0f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1d7736ce54b841e59224292e66227e0f 2024-11-20T17:26:25,683 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/70ae010f71b644fead7104144fb1dc3f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/70ae010f71b644fead7104144fb1dc3f 2024-11-20T17:26:25,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/242ce37945744c49989dbfd5c932b0ad to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/242ce37945744c49989dbfd5c932b0ad 2024-11-20T17:26:25,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/fcefe579703b4ecbace8a2f11dce6dc9 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/fcefe579703b4ecbace8a2f11dce6dc9 2024-11-20T17:26:25,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/a41c89173aeb4a4cbe75c55b4274e818, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2e8f09844f3d4aae8a56d8c1491f883f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3ed660e04785416d950ef29837bdc1b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ad1ac912105141ec82842d9de6d378a0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/41914f94ac374e7fa48e5192d1b247ce, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/afaab08e3df343c8a86b932da8e88703, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3fad7a828c444a87b1273ef42fb36938, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/86946b2637d14c4585f72ffdbbed921b] to archive 2024-11-20T17:26:25,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:25,687 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/a41c89173aeb4a4cbe75c55b4274e818 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/a41c89173aeb4a4cbe75c55b4274e818 2024-11-20T17:26:25,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2e8f09844f3d4aae8a56d8c1491f883f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/2e8f09844f3d4aae8a56d8c1491f883f 2024-11-20T17:26:25,689 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3ed660e04785416d950ef29837bdc1b2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3ed660e04785416d950ef29837bdc1b2 2024-11-20T17:26:25,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ad1ac912105141ec82842d9de6d378a0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ad1ac912105141ec82842d9de6d378a0 2024-11-20T17:26:25,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/41914f94ac374e7fa48e5192d1b247ce to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/41914f94ac374e7fa48e5192d1b247ce 2024-11-20T17:26:25,691 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/afaab08e3df343c8a86b932da8e88703 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/afaab08e3df343c8a86b932da8e88703 2024-11-20T17:26:25,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3fad7a828c444a87b1273ef42fb36938 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/3fad7a828c444a87b1273ef42fb36938 2024-11-20T17:26:25,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/86946b2637d14c4585f72ffdbbed921b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/86946b2637d14c4585f72ffdbbed921b 2024-11-20T17:26:25,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/ec563a66560341d8a11814f659410df8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4ec7145e8ac9479e83fa4c6c93e6b181, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf2e24b8400d4e5792b3655c2c4f3c62, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcdfa7c0695346429437c1a1e42600b6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3bfbf4fc36a54f39b5eb0a0015d0642a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a04ee55b0dfe44e5b2088ce31ae59ee5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3f8606f2336a471692c50874cd0fc10a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/72f6b379b3974a9c94441946b04881b7] to archive 2024-11-20T17:26:25,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:25,695 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/ec563a66560341d8a11814f659410df8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/ec563a66560341d8a11814f659410df8 2024-11-20T17:26:25,696 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4ec7145e8ac9479e83fa4c6c93e6b181 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/4ec7145e8ac9479e83fa4c6c93e6b181 2024-11-20T17:26:25,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf2e24b8400d4e5792b3655c2c4f3c62 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/cf2e24b8400d4e5792b3655c2c4f3c62 2024-11-20T17:26:25,697 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcdfa7c0695346429437c1a1e42600b6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/fcdfa7c0695346429437c1a1e42600b6 2024-11-20T17:26:25,698 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3bfbf4fc36a54f39b5eb0a0015d0642a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3bfbf4fc36a54f39b5eb0a0015d0642a 2024-11-20T17:26:25,699 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a04ee55b0dfe44e5b2088ce31ae59ee5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/a04ee55b0dfe44e5b2088ce31ae59ee5 2024-11-20T17:26:25,700 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3f8606f2336a471692c50874cd0fc10a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/3f8606f2336a471692c50874cd0fc10a 2024-11-20T17:26:25,701 DEBUG [StoreCloser-TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/72f6b379b3974a9c94441946b04881b7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/72f6b379b3974a9c94441946b04881b7 2024-11-20T17:26:25,704 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/recovered.edits/367.seqid, newMaxSeqId=367, maxSeqId=4 2024-11-20T17:26:25,704 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec. 2024-11-20T17:26:25,704 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1635): Region close journal for 464447c196464f9bd851ab5282adadec: 2024-11-20T17:26:25,705 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(170): Closed 464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,706 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=464447c196464f9bd851ab5282adadec, regionState=CLOSED 2024-11-20T17:26:25,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-20T17:26:25,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; CloseRegionProcedure 464447c196464f9bd851ab5282adadec, server=d514dc944523,44015,1732123455293 in 1.8320 sec 2024-11-20T17:26:25,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=111 2024-11-20T17:26:25,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=111, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=464447c196464f9bd851ab5282adadec, UNASSIGN in 1.8350 sec 2024-11-20T17:26:25,709 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T17:26:25,709 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8380 sec 2024-11-20T17:26:25,710 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123585710"}]},"ts":"1732123585710"} 2024-11-20T17:26:25,711 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:26:25,713 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:26:25,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8500 sec 2024-11-20T17:26:25,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T17:26:25,970 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-20T17:26:25,970 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:26:25,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:25,971 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:25,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:26:25,972 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=114, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:25,974 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,976 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/recovered.edits] 2024-11-20T17:26:25,978 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1f4932c7d7ae4e3cabc3add936d94138 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/1f4932c7d7ae4e3cabc3add936d94138 2024-11-20T17:26:25,979 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/282a1dce83954debad950878b353cd83 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/A/282a1dce83954debad950878b353cd83 2024-11-20T17:26:25,980 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/31e5507f70e54d55a0670f1f3114bf51 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/31e5507f70e54d55a0670f1f3114bf51 2024-11-20T17:26:25,981 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ff10ea6c78974a16988db03ce9f8f1f4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/B/ff10ea6c78974a16988db03ce9f8f1f4 2024-11-20T17:26:25,983 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/63de9511a3a048d39aa1f302df0f520e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/63de9511a3a048d39aa1f302df0f520e 2024-11-20T17:26:25,984 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/b8040a9c3b864f83a5b8296efd08edc2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/C/b8040a9c3b864f83a5b8296efd08edc2 2024-11-20T17:26:25,986 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/recovered.edits/367.seqid to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec/recovered.edits/367.seqid 2024-11-20T17:26:25,986 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,987 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:26:25,987 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:26:25,988 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T17:26:25,990 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120228041722efa483bb9f0d5c6a2209846_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120228041722efa483bb9f0d5c6a2209846_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,991 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112026b77b0ddafc4fb1870338e735bbb22a_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112026b77b0ddafc4fb1870338e735bbb22a_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,992 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e84ce0d96814594a1777bb68b254f4d_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203e84ce0d96814594a1777bb68b254f4d_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,992 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112056e04aaa2d7e44549491177a0b7b0e3e_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112056e04aaa2d7e44549491177a0b7b0e3e_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,993 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112064a415429cd24533acd92c2c1d460419_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112064a415429cd24533acd92c2c1d460419_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,994 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112066ca4fc33fe5403f9ab7ec3a54773f27_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112066ca4fc33fe5403f9ab7ec3a54773f27_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,995 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112068299829fdbc46daa531938666f5e3e3_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112068299829fdbc46daa531938666f5e3e3_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,996 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112074c3bb79ba044041ab320425d73a0ffe_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112074c3bb79ba044041ab320425d73a0ffe_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,997 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207eb04df96d4d4b6e8c47d07c4271aef6_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207eb04df96d4d4b6e8c47d07c4271aef6_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,998 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112084a215bcc8cc4ad8ade757341ed79deb_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112084a215bcc8cc4ad8ade757341ed79deb_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,999 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112085a3cfbaa3ee417a8670fc9c65075136_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112085a3cfbaa3ee417a8670fc9c65075136_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:25,999 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112085c8150f91624837bf542acae4841b72_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112085c8150f91624837bf542acae4841b72_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:26,000 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088d65daeaebc4b58a2053f09803c082a_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112088d65daeaebc4b58a2053f09803c082a_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:26,001 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209ac201faa9254e71bc063ad2d16ea280_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209ac201faa9254e71bc063ad2d16ea280_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:26,002 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209c9ddd99639d4d7e91922ddf204004bf_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209c9ddd99639d4d7e91922ddf204004bf_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:26,003 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209d06091883214ec5919fac33a1fd8d9b_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209d06091883214ec5919fac33a1fd8d9b_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:26,004 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a19a632a66e34e8db315e5c9ecf2624c_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a19a632a66e34e8db315e5c9ecf2624c_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:26,004 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ae862555a0984231aca385f711248a9a_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ae862555a0984231aca385f711248a9a_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:26,005 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e47764e19bbb40afbfbc22dfa22d5b92_464447c196464f9bd851ab5282adadec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e47764e19bbb40afbfbc22dfa22d5b92_464447c196464f9bd851ab5282adadec 2024-11-20T17:26:26,005 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:26:26,007 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=114, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:26,008 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:26:26,010 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:26:26,011 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=114, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:26,011 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:26:26,011 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123586011"}]},"ts":"9223372036854775807"} 2024-11-20T17:26:26,012 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:26:26,012 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 464447c196464f9bd851ab5282adadec, NAME => 'TestAcidGuarantees,,1732123557816.464447c196464f9bd851ab5282adadec.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:26:26,012 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:26:26,012 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123586012"}]},"ts":"9223372036854775807"} 2024-11-20T17:26:26,014 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:26:26,016 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=114, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:26,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 46 msec 2024-11-20T17:26:26,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T17:26:26,073 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-20T17:26:26,082 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=242 (was 238) - Thread LEAK? -, OpenFileDescriptor=461 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=289 (was 226) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6128 (was 6174) 2024-11-20T17:26:26,090 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=242, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=289, ProcessCount=11, AvailableMemoryMB=6128 2024-11-20T17:26:26,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:26:26,092 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:26:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:26,093 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:26:26,093 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:26,093 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 115 2024-11-20T17:26:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-20T17:26:26,094 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:26:26,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742251_1427 (size=963) 2024-11-20T17:26:26,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-20T17:26:26,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-20T17:26:26,501 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:26:26,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742252_1428 (size=53) 2024-11-20T17:26:26,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-20T17:26:26,906 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:26:26,906 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b3043adcc65f1dded05c4bfd9fcde44b, disabling compactions & flushes 2024-11-20T17:26:26,906 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:26,906 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:26,906 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. after waiting 0 ms 2024-11-20T17:26:26,906 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:26,906 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:26,906 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:26,907 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:26:26,907 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123586907"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123586907"}]},"ts":"1732123586907"} 2024-11-20T17:26:26,908 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:26:26,909 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:26:26,909 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123586909"}]},"ts":"1732123586909"} 2024-11-20T17:26:26,910 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:26:26,914 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b3043adcc65f1dded05c4bfd9fcde44b, ASSIGN}] 2024-11-20T17:26:26,914 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b3043adcc65f1dded05c4bfd9fcde44b, ASSIGN 2024-11-20T17:26:26,915 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b3043adcc65f1dded05c4bfd9fcde44b, ASSIGN; state=OFFLINE, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=false 2024-11-20T17:26:27,065 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=b3043adcc65f1dded05c4bfd9fcde44b, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:27,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; OpenRegionProcedure b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:26:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-20T17:26:27,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:27,220 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:27,221 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7285): Opening region: {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:26:27,221 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,221 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:26:27,221 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7327): checking encryption for b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,221 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7330): checking classloading for b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,222 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,223 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:27,223 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b3043adcc65f1dded05c4bfd9fcde44b columnFamilyName A 2024-11-20T17:26:27,224 DEBUG [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:27,224 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.HStore(327): Store=b3043adcc65f1dded05c4bfd9fcde44b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:27,224 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,225 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:27,225 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b3043adcc65f1dded05c4bfd9fcde44b columnFamilyName B 2024-11-20T17:26:27,225 DEBUG [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:27,225 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.HStore(327): Store=b3043adcc65f1dded05c4bfd9fcde44b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:27,225 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,226 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:27,226 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b3043adcc65f1dded05c4bfd9fcde44b columnFamilyName C 2024-11-20T17:26:27,226 DEBUG [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:27,227 INFO [StoreOpener-b3043adcc65f1dded05c4bfd9fcde44b-1 {}] regionserver.HStore(327): Store=b3043adcc65f1dded05c4bfd9fcde44b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:27,227 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:27,228 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,228 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,229 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:26:27,230 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1085): writing seq id for b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:27,231 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:26:27,232 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1102): Opened b3043adcc65f1dded05c4bfd9fcde44b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68262397, jitterRate=0.01718898117542267}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:26:27,232 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1001): Region open journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:27,233 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., pid=117, masterSystemTime=1732123587218 2024-11-20T17:26:27,234 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:27,234 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:27,234 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=b3043adcc65f1dded05c4bfd9fcde44b, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:27,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-20T17:26:27,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; OpenRegionProcedure b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 in 169 msec 2024-11-20T17:26:27,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-11-20T17:26:27,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b3043adcc65f1dded05c4bfd9fcde44b, ASSIGN in 322 msec 2024-11-20T17:26:27,237 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:26:27,238 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123587237"}]},"ts":"1732123587237"} 2024-11-20T17:26:27,238 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:26:27,240 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:26:27,241 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-11-20T17:26:28,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-11-20T17:26:28,198 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 115 completed 2024-11-20T17:26:28,199 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38dd8644 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@466b85c9 2024-11-20T17:26:28,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@786b5809, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,203 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,204 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,204 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:26:28,205 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:26:28,206 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65e17c26 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f3ee89e 2024-11-20T17:26:28,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d375c60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,210 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53fc02ba to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b0e6a43 2024-11-20T17:26:28,212 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cbdf91e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,213 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2011d733 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8e5fd00 2024-11-20T17:26:28,215 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc3900b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,216 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39b3baa5 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e195d6e 2024-11-20T17:26:28,218 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@599dd56e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,219 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x402e5def to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14088aa9 2024-11-20T17:26:28,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23090be3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,222 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40302925 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b8d64d3 2024-11-20T17:26:28,225 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bb51dfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,226 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47ef9951 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@784d683 2024-11-20T17:26:28,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@540d7172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,229 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x567011a8 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7761f52b 2024-11-20T17:26:28,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48588c54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,233 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x02430fee to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a736a20 2024-11-20T17:26:28,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76c56316, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,236 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d567fc2 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c153822 2024-11-20T17:26:28,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41953565, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:28,242 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:28,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-20T17:26:28,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:26:28,243 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:28,243 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:28,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:28,248 DEBUG [hconnection-0x532c78fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,249 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,249 DEBUG [hconnection-0x2631425b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,250 DEBUG [hconnection-0x23b4d05-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,250 DEBUG [hconnection-0x8b32d56-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,250 DEBUG [hconnection-0x5ec32a7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,250 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,250 DEBUG [hconnection-0x7cad2f44-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,251 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,251 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,251 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,251 DEBUG [hconnection-0x524bff33-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,251 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,252 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:28,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:28,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:28,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:28,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:28,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:28,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:28,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:28,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123648267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123648268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123648269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123648269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,275 DEBUG [hconnection-0x7222a51b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,276 DEBUG [hconnection-0x54c5e52e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,277 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34276, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123648278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,279 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34288, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,280 DEBUG [hconnection-0x211a9e92-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:28,281 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34292, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:28,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/bad865775069413696e5a7f3de1f6afb is 50, key is test_row_0/A:col10/1732123588254/Put/seqid=0 2024-11-20T17:26:28,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742253_1429 (size=12001) 2024-11-20T17:26:28,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:26:28,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123648370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123648370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123648371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123648371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123648381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,394 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:28,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:28,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:26:28,548 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:28,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:28,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:28,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:28,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:28,549 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123648574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123648574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123648575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123648575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123648584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,701 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:28,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:28,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/bad865775069413696e5a7f3de1f6afb 2024-11-20T17:26:28,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/447fd3115def45eaa1ce6f780b0b9452 is 50, key is test_row_0/B:col10/1732123588254/Put/seqid=0 2024-11-20T17:26:28,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742254_1430 (size=12001) 2024-11-20T17:26:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:26:28,853 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:28,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:28,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:28,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:28,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:28,854 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:28,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123648878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123648879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123648880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123648881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:28,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:28,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123648887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:29,005 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:29,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:29,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:29,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,139 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/447fd3115def45eaa1ce6f780b0b9452 2024-11-20T17:26:29,158 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:29,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:29,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:29,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/26aae5d815d3424296cc79d3a39cff2b is 50, key is test_row_0/C:col10/1732123588254/Put/seqid=0 2024-11-20T17:26:29,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742255_1431 (size=12001) 2024-11-20T17:26:29,310 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:29,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:29,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:29,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:26:29,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:29,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123649383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:29,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:29,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123649383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:29,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:29,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123649385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:29,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:29,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123649387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:29,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:29,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123649393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:29,463 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:29,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:29,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:29,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:29,570 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/26aae5d815d3424296cc79d3a39cff2b 2024-11-20T17:26:29,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/bad865775069413696e5a7f3de1f6afb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/bad865775069413696e5a7f3de1f6afb 2024-11-20T17:26:29,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/bad865775069413696e5a7f3de1f6afb, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T17:26:29,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/447fd3115def45eaa1ce6f780b0b9452 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/447fd3115def45eaa1ce6f780b0b9452 2024-11-20T17:26:29,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/447fd3115def45eaa1ce6f780b0b9452, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T17:26:29,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/26aae5d815d3424296cc79d3a39cff2b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/26aae5d815d3424296cc79d3a39cff2b 2024-11-20T17:26:29,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/26aae5d815d3424296cc79d3a39cff2b, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T17:26:29,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for b3043adcc65f1dded05c4bfd9fcde44b in 1331ms, sequenceid=15, compaction requested=false 2024-11-20T17:26:29,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:29,615 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:29,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T17:26:29,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:29,616 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:26:29,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:29,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:29,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:29,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:29,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:29,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:29,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/e48bc44672f74b898803e43fd1b59d62 is 50, key is test_row_0/A:col10/1732123588264/Put/seqid=0 2024-11-20T17:26:29,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742256_1432 (size=12001) 2024-11-20T17:26:29,628 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/e48bc44672f74b898803e43fd1b59d62 2024-11-20T17:26:29,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/fab796eee72e4adb8b0c7f42a4a84ff5 is 50, key is test_row_0/B:col10/1732123588264/Put/seqid=0 2024-11-20T17:26:29,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742257_1433 (size=12001) 2024-11-20T17:26:29,641 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/fab796eee72e4adb8b0c7f42a4a84ff5 2024-11-20T17:26:29,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/14de6a676eeb41ffbcaa9d916a9a4fd6 is 50, key is test_row_0/C:col10/1732123588264/Put/seqid=0 2024-11-20T17:26:29,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742258_1434 (size=12001) 2024-11-20T17:26:30,053 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/14de6a676eeb41ffbcaa9d916a9a4fd6 2024-11-20T17:26:30,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/e48bc44672f74b898803e43fd1b59d62 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e48bc44672f74b898803e43fd1b59d62 2024-11-20T17:26:30,061 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e48bc44672f74b898803e43fd1b59d62, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T17:26:30,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/fab796eee72e4adb8b0c7f42a4a84ff5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fab796eee72e4adb8b0c7f42a4a84ff5 2024-11-20T17:26:30,066 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fab796eee72e4adb8b0c7f42a4a84ff5, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T17:26:30,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/14de6a676eeb41ffbcaa9d916a9a4fd6 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/14de6a676eeb41ffbcaa9d916a9a4fd6 2024-11-20T17:26:30,070 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/14de6a676eeb41ffbcaa9d916a9a4fd6, entries=150, sequenceid=39, filesize=11.7 K 2024-11-20T17:26:30,071 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for b3043adcc65f1dded05c4bfd9fcde44b in 455ms, sequenceid=39, compaction requested=false 2024-11-20T17:26:30,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:30,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-20T17:26:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-20T17:26:30,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T17:26:30,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8290 sec 2024-11-20T17:26:30,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.8320 sec 2024-11-20T17:26:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T17:26:30,347 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-20T17:26:30,348 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-20T17:26:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:26:30,350 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:30,350 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:30,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:30,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:30,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:30,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:30,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:30,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:30,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:30,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:30,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/643ac2ccae094a5d80ad7eed094b0252 is 50, key is test_row_0/A:col10/1732123590397/Put/seqid=0 2024-11-20T17:26:30,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742259_1435 (size=16681) 2024-11-20T17:26:30,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123650421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123650422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123650425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123650426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123650427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:26:30,502 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:30,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T17:26:30,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:30,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123650528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123650528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123650530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123650530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123650533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:26:30,654 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:30,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T17:26:30,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:30,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123650732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123650732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123650734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123650735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:30,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123650737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:30,807 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:30,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T17:26:30,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:30,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,811 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/643ac2ccae094a5d80ad7eed094b0252 2024-11-20T17:26:30,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/2e665a485f4a4ad8bd24e4b84674af29 is 50, key is test_row_0/B:col10/1732123590397/Put/seqid=0 2024-11-20T17:26:30,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742260_1436 (size=12001) 2024-11-20T17:26:30,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/2e665a485f4a4ad8bd24e4b84674af29 2024-11-20T17:26:30,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372 is 50, key is test_row_0/C:col10/1732123590397/Put/seqid=0 2024-11-20T17:26:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742261_1437 (size=12001) 2024-11-20T17:26:30,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:26:30,960 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:30,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T17:26:30,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:30,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:30,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:30,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123651038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123651038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123651038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123651038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123651046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:31,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T17:26:31,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:31,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,222 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:26:31,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372 2024-11-20T17:26:31,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/643ac2ccae094a5d80ad7eed094b0252 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/643ac2ccae094a5d80ad7eed094b0252 2024-11-20T17:26:31,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/643ac2ccae094a5d80ad7eed094b0252, entries=250, sequenceid=51, filesize=16.3 K 2024-11-20T17:26:31,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/2e665a485f4a4ad8bd24e4b84674af29 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2e665a485f4a4ad8bd24e4b84674af29 2024-11-20T17:26:31,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2e665a485f4a4ad8bd24e4b84674af29, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:26:31,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372 2024-11-20T17:26:31,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:26:31,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b3043adcc65f1dded05c4bfd9fcde44b in 854ms, sequenceid=51, compaction requested=true 2024-11-20T17:26:31,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:31,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:31,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:31,254 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:31,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:31,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:31,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:31,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:31,254 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:31,255 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:31,255 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:31,255 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/B is initiating minor compaction (all files) 2024-11-20T17:26:31,255 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/A is initiating minor compaction (all files) 2024-11-20T17:26:31,255 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/B in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,255 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/A in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,255 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/447fd3115def45eaa1ce6f780b0b9452, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fab796eee72e4adb8b0c7f42a4a84ff5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2e665a485f4a4ad8bd24e4b84674af29] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.2 K 2024-11-20T17:26:31,255 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/bad865775069413696e5a7f3de1f6afb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e48bc44672f74b898803e43fd1b59d62, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/643ac2ccae094a5d80ad7eed094b0252] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=39.7 K 2024-11-20T17:26:31,255 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting bad865775069413696e5a7f3de1f6afb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732123588254 2024-11-20T17:26:31,255 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 447fd3115def45eaa1ce6f780b0b9452, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732123588254 2024-11-20T17:26:31,256 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting fab796eee72e4adb8b0c7f42a4a84ff5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732123588264 2024-11-20T17:26:31,256 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting e48bc44672f74b898803e43fd1b59d62, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732123588264 2024-11-20T17:26:31,256 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e665a485f4a4ad8bd24e4b84674af29, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123590396 2024-11-20T17:26:31,256 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 643ac2ccae094a5d80ad7eed094b0252, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123590396 2024-11-20T17:26:31,263 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#B#compaction#363 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:31,263 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/075967393db04289ad365bbb74547112 is 50, key is test_row_0/B:col10/1732123590397/Put/seqid=0 2024-11-20T17:26:31,266 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#A#compaction#364 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:31,266 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/0d804aa774bb463a9673970bda8bf64a is 50, key is test_row_0/A:col10/1732123590397/Put/seqid=0 2024-11-20T17:26:31,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:31,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T17:26:31,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,271 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:26:31,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:31,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:31,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:31,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:31,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:31,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:31,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742262_1438 (size=12104) 2024-11-20T17:26:31,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742263_1439 (size=12104) 2024-11-20T17:26:31,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/acaadd3292e94a7ca7e426f580fc58a0 is 50, key is test_row_0/A:col10/1732123590425/Put/seqid=0 2024-11-20T17:26:31,281 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/0d804aa774bb463a9673970bda8bf64a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0d804aa774bb463a9673970bda8bf64a 2024-11-20T17:26:31,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742264_1440 (size=12001) 2024-11-20T17:26:31,287 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/acaadd3292e94a7ca7e426f580fc58a0 2024-11-20T17:26:31,287 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/A of b3043adcc65f1dded05c4bfd9fcde44b into 0d804aa774bb463a9673970bda8bf64a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:31,287 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:31,287 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/A, priority=13, startTime=1732123591254; duration=0sec 2024-11-20T17:26:31,287 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:31,287 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:A 2024-11-20T17:26:31,287 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:31,289 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:31,289 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/C is initiating minor compaction (all files) 2024-11-20T17:26:31,289 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/C in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,289 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/26aae5d815d3424296cc79d3a39cff2b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/14de6a676eeb41ffbcaa9d916a9a4fd6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.2 K 2024-11-20T17:26:31,290 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26aae5d815d3424296cc79d3a39cff2b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732123588254 2024-11-20T17:26:31,291 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14de6a676eeb41ffbcaa9d916a9a4fd6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732123588264 2024-11-20T17:26:31,291 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6eaa1a8c8e2a4fb290c90f1ce4b0d372, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123590396 2024-11-20T17:26:31,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/d84c9154f4de4048ab8fe1f7a06ded2d is 50, key is test_row_0/B:col10/1732123590425/Put/seqid=0 2024-11-20T17:26:31,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742265_1441 (size=12001) 2024-11-20T17:26:31,300 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#C#compaction#367 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:31,300 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/47724df206cf426aa5ffd7aa4310478b is 50, key is test_row_0/C:col10/1732123590397/Put/seqid=0 2024-11-20T17:26:31,301 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/d84c9154f4de4048ab8fe1f7a06ded2d 2024-11-20T17:26:31,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742266_1442 (size=12104) 2024-11-20T17:26:31,310 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/47724df206cf426aa5ffd7aa4310478b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/47724df206cf426aa5ffd7aa4310478b 2024-11-20T17:26:31,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/1e7e27358e65464dabbaefd5788df26e is 50, key is test_row_0/C:col10/1732123590425/Put/seqid=0 2024-11-20T17:26:31,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742267_1443 (size=12001) 2024-11-20T17:26:31,315 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/1e7e27358e65464dabbaefd5788df26e 2024-11-20T17:26:31,316 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/C of b3043adcc65f1dded05c4bfd9fcde44b into 47724df206cf426aa5ffd7aa4310478b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:31,316 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:31,316 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/C, priority=13, startTime=1732123591254; duration=0sec 2024-11-20T17:26:31,316 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:31,316 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:C 2024-11-20T17:26:31,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/acaadd3292e94a7ca7e426f580fc58a0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/acaadd3292e94a7ca7e426f580fc58a0 2024-11-20T17:26:31,323 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/acaadd3292e94a7ca7e426f580fc58a0, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T17:26:31,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/d84c9154f4de4048ab8fe1f7a06ded2d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/d84c9154f4de4048ab8fe1f7a06ded2d 2024-11-20T17:26:31,327 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/d84c9154f4de4048ab8fe1f7a06ded2d, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T17:26:31,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/1e7e27358e65464dabbaefd5788df26e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1e7e27358e65464dabbaefd5788df26e 2024-11-20T17:26:31,331 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1e7e27358e65464dabbaefd5788df26e, entries=150, sequenceid=75, filesize=11.7 K 2024-11-20T17:26:31,332 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for b3043adcc65f1dded05c4bfd9fcde44b in 61ms, sequenceid=75, compaction requested=false 2024-11-20T17:26:31,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:31,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-20T17:26:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-20T17:26:31,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T17:26:31,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 984 msec 2024-11-20T17:26:31,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 990 msec 2024-11-20T17:26:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T17:26:31,453 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-20T17:26:31,454 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:31,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-20T17:26:31,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T17:26:31,455 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:31,456 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:31,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:31,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:31,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:31,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:31,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:31,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:31,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:31,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T17:26:31,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:31,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:31,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/59713f3970ac425cb58d98cd485c2f8a is 50, key is test_row_0/A:col10/1732123591551/Put/seqid=0 2024-11-20T17:26:31,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742268_1444 (size=16681) 2024-11-20T17:26:31,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123651582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123651586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123651587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123651587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123651588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:31,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:31,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:31,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,680 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/075967393db04289ad365bbb74547112 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/075967393db04289ad365bbb74547112 2024-11-20T17:26:31,684 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/B of b3043adcc65f1dded05c4bfd9fcde44b into 075967393db04289ad365bbb74547112(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:31,684 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:31,684 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/B, priority=13, startTime=1732123591254; duration=0sec 2024-11-20T17:26:31,684 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:31,684 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:B 2024-11-20T17:26:31,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123651689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123651693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123651693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123651693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123651698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T17:26:31,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:31,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:31,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:31,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123651895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123651896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123651896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123651896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123651904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:31,913 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:31,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:31,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:31,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:31,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:31,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/59713f3970ac425cb58d98cd485c2f8a 2024-11-20T17:26:31,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/539f8e96a39e4473906fcc37aa0c9946 is 50, key is test_row_0/B:col10/1732123591551/Put/seqid=0 2024-11-20T17:26:31,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742269_1445 (size=12001) 2024-11-20T17:26:32,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T17:26:32,065 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:32,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:32,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:32,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123652203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123652203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123652203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123652204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123652212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,218 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:32,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:32,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:32,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,370 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:32,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:32,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:32,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/539f8e96a39e4473906fcc37aa0c9946 2024-11-20T17:26:32,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/af83b453f6334c28a7280e20155c90f2 is 50, key is test_row_0/C:col10/1732123591551/Put/seqid=0 2024-11-20T17:26:32,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742270_1446 (size=12001) 2024-11-20T17:26:32,523 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:32,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:32,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:32,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T17:26:32,675 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:32,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:32,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123652705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123652707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123652710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123652711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:32,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123652718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:32,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/af83b453f6334c28a7280e20155c90f2 2024-11-20T17:26:32,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/59713f3970ac425cb58d98cd485c2f8a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/59713f3970ac425cb58d98cd485c2f8a 2024-11-20T17:26:32,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/59713f3970ac425cb58d98cd485c2f8a, entries=250, sequenceid=89, filesize=16.3 K 2024-11-20T17:26:32,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/539f8e96a39e4473906fcc37aa0c9946 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/539f8e96a39e4473906fcc37aa0c9946 2024-11-20T17:26:32,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/539f8e96a39e4473906fcc37aa0c9946, entries=150, sequenceid=89, filesize=11.7 K 2024-11-20T17:26:32,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/af83b453f6334c28a7280e20155c90f2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/af83b453f6334c28a7280e20155c90f2 2024-11-20T17:26:32,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/af83b453f6334c28a7280e20155c90f2, entries=150, sequenceid=89, filesize=11.7 K 2024-11-20T17:26:32,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for b3043adcc65f1dded05c4bfd9fcde44b in 1254ms, sequenceid=89, compaction requested=true 2024-11-20T17:26:32,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:32,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:32,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:32,809 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:32,809 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:32,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:32,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:32,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:32,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:32,810 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:32,810 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:32,810 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/A is initiating minor compaction (all files) 2024-11-20T17:26:32,810 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/B is initiating minor compaction (all files) 2024-11-20T17:26:32,810 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/A in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,810 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/B in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,810 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0d804aa774bb463a9673970bda8bf64a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/acaadd3292e94a7ca7e426f580fc58a0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/59713f3970ac425cb58d98cd485c2f8a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=39.8 K 2024-11-20T17:26:32,810 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/075967393db04289ad365bbb74547112, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/d84c9154f4de4048ab8fe1f7a06ded2d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/539f8e96a39e4473906fcc37aa0c9946] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.3 K 2024-11-20T17:26:32,810 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d804aa774bb463a9673970bda8bf64a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123590396 2024-11-20T17:26:32,810 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 075967393db04289ad365bbb74547112, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123590396 2024-11-20T17:26:32,811 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting d84c9154f4de4048ab8fe1f7a06ded2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732123590420 2024-11-20T17:26:32,811 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting acaadd3292e94a7ca7e426f580fc58a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732123590420 2024-11-20T17:26:32,811 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59713f3970ac425cb58d98cd485c2f8a, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123591548 2024-11-20T17:26:32,811 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 539f8e96a39e4473906fcc37aa0c9946, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123591551 2024-11-20T17:26:32,817 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#B#compaction#372 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:32,817 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/43c1a2dd4ad844439d5fb904b5101ee9 is 50, key is test_row_0/B:col10/1732123591551/Put/seqid=0 2024-11-20T17:26:32,817 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#A#compaction#373 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:32,818 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/1189d10b14944ec5846ec1ac4422bb07 is 50, key is test_row_0/A:col10/1732123591551/Put/seqid=0 2024-11-20T17:26:32,828 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:32,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-20T17:26:32,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,829 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:26:32,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:32,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:32,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:32,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:32,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:32,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:32,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742271_1447 (size=12207) 2024-11-20T17:26:32,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742272_1448 (size=12207) 2024-11-20T17:26:32,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/9e71f89512314bb3a9e3d59d2c2941da is 50, key is test_row_0/A:col10/1732123591586/Put/seqid=0 2024-11-20T17:26:32,848 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/1189d10b14944ec5846ec1ac4422bb07 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1189d10b14944ec5846ec1ac4422bb07 2024-11-20T17:26:32,848 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/43c1a2dd4ad844439d5fb904b5101ee9 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/43c1a2dd4ad844439d5fb904b5101ee9 2024-11-20T17:26:32,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742273_1449 (size=12001) 2024-11-20T17:26:32,854 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/B of b3043adcc65f1dded05c4bfd9fcde44b into 43c1a2dd4ad844439d5fb904b5101ee9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:32,854 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:32,854 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/B, priority=13, startTime=1732123592809; duration=0sec 2024-11-20T17:26:32,855 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:32,855 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:B 2024-11-20T17:26:32,855 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:32,856 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:32,856 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/A of b3043adcc65f1dded05c4bfd9fcde44b into 1189d10b14944ec5846ec1ac4422bb07(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:32,856 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:32,856 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/C is initiating minor compaction (all files) 2024-11-20T17:26:32,856 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/A, priority=13, startTime=1732123592809; duration=0sec 2024-11-20T17:26:32,856 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/C in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:32,856 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:32,856 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:A 2024-11-20T17:26:32,856 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/47724df206cf426aa5ffd7aa4310478b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1e7e27358e65464dabbaefd5788df26e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/af83b453f6334c28a7280e20155c90f2] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.3 K 2024-11-20T17:26:32,858 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 47724df206cf426aa5ffd7aa4310478b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123590396 2024-11-20T17:26:32,858 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e7e27358e65464dabbaefd5788df26e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732123590420 2024-11-20T17:26:32,858 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting af83b453f6334c28a7280e20155c90f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123591551 2024-11-20T17:26:32,865 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#C#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:32,866 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/abc4c70b0d85427fb3dd3251f1f1995b is 50, key is test_row_0/C:col10/1732123591551/Put/seqid=0 2024-11-20T17:26:32,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742274_1450 (size=12207) 2024-11-20T17:26:33,251 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/9e71f89512314bb3a9e3d59d2c2941da 2024-11-20T17:26:33,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/bd5ff5b70ec44098a19609fa52ae713b is 50, key is test_row_0/B:col10/1732123591586/Put/seqid=0 2024-11-20T17:26:33,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742275_1451 (size=12001) 2024-11-20T17:26:33,280 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/abc4c70b0d85427fb3dd3251f1f1995b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/abc4c70b0d85427fb3dd3251f1f1995b 2024-11-20T17:26:33,284 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/C of b3043adcc65f1dded05c4bfd9fcde44b into abc4c70b0d85427fb3dd3251f1f1995b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:33,284 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:33,284 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/C, priority=13, startTime=1732123592809; duration=0sec 2024-11-20T17:26:33,285 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:33,285 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:C 2024-11-20T17:26:33,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T17:26:33,661 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/bd5ff5b70ec44098a19609fa52ae713b 2024-11-20T17:26:33,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/a1d2b0f752df4950a219b2df75c801d0 is 50, key is test_row_0/C:col10/1732123591586/Put/seqid=0 2024-11-20T17:26:33,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742276_1452 (size=12001) 2024-11-20T17:26:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:33,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:33,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123653748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123653753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123653754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123653755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123653755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123653856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123653859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123653859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123653859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:33,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:33,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123653859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123654059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123654063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123654063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123654063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123654064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,072 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/a1d2b0f752df4950a219b2df75c801d0 2024-11-20T17:26:34,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/9e71f89512314bb3a9e3d59d2c2941da as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9e71f89512314bb3a9e3d59d2c2941da 2024-11-20T17:26:34,079 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9e71f89512314bb3a9e3d59d2c2941da, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T17:26:34,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/bd5ff5b70ec44098a19609fa52ae713b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/bd5ff5b70ec44098a19609fa52ae713b 2024-11-20T17:26:34,083 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/bd5ff5b70ec44098a19609fa52ae713b, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T17:26:34,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/a1d2b0f752df4950a219b2df75c801d0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a1d2b0f752df4950a219b2df75c801d0 2024-11-20T17:26:34,087 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a1d2b0f752df4950a219b2df75c801d0, entries=150, sequenceid=114, filesize=11.7 K 2024-11-20T17:26:34,087 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for b3043adcc65f1dded05c4bfd9fcde44b in 1258ms, sequenceid=114, compaction requested=false 2024-11-20T17:26:34,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:34,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:34,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-20T17:26:34,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-20T17:26:34,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-20T17:26:34,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6330 sec 2024-11-20T17:26:34,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 2.6360 sec 2024-11-20T17:26:34,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:34,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:26:34,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:34,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:34,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:34,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:34,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:34,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:34,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/6a279dbd22c54096ba99ce21422e5f88 is 50, key is test_row_0/A:col10/1732123594365/Put/seqid=0 2024-11-20T17:26:34,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742277_1453 (size=12101) 2024-11-20T17:26:34,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123654388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123654389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123654390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123654391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123654396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123654497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123654497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123654498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123654497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123654502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123654705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123654706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123654706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123654706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:34,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123654706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:34,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/6a279dbd22c54096ba99ce21422e5f88 2024-11-20T17:26:34,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/df0b77be78c24a42bc9765b0e616a6c4 is 50, key is test_row_0/B:col10/1732123594365/Put/seqid=0 2024-11-20T17:26:34,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742278_1454 (size=12101) 2024-11-20T17:26:35,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123655011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123655011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123655012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123655013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123655013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/df0b77be78c24a42bc9765b0e616a6c4 2024-11-20T17:26:35,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/e7e0f63d5bb34e19a4935d90604bec8f is 50, key is test_row_0/C:col10/1732123594365/Put/seqid=0 2024-11-20T17:26:35,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742279_1455 (size=12101) 2024-11-20T17:26:35,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123655514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123655517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123655518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123655518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:35,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123655518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:35,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T17:26:35,560 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-20T17:26:35,561 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-20T17:26:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T17:26:35,562 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:35,563 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:35,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:35,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/e7e0f63d5bb34e19a4935d90604bec8f 2024-11-20T17:26:35,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/6a279dbd22c54096ba99ce21422e5f88 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/6a279dbd22c54096ba99ce21422e5f88 2024-11-20T17:26:35,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/6a279dbd22c54096ba99ce21422e5f88, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T17:26:35,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/df0b77be78c24a42bc9765b0e616a6c4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/df0b77be78c24a42bc9765b0e616a6c4 2024-11-20T17:26:35,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/df0b77be78c24a42bc9765b0e616a6c4, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T17:26:35,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/e7e0f63d5bb34e19a4935d90604bec8f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/e7e0f63d5bb34e19a4935d90604bec8f 2024-11-20T17:26:35,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/e7e0f63d5bb34e19a4935d90604bec8f, entries=150, sequenceid=131, filesize=11.8 K 2024-11-20T17:26:35,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for b3043adcc65f1dded05c4bfd9fcde44b in 1251ms, sequenceid=131, compaction requested=true 2024-11-20T17:26:35,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:35,618 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:35,618 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:35,618 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:35,618 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:35,619 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/B is initiating minor compaction (all files) 2024-11-20T17:26:35,619 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/A is initiating minor compaction (all files) 2024-11-20T17:26:35,619 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/B in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:35,619 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/A in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:35,619 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/43c1a2dd4ad844439d5fb904b5101ee9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/bd5ff5b70ec44098a19609fa52ae713b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/df0b77be78c24a42bc9765b0e616a6c4] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.5 K 2024-11-20T17:26:35,619 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1189d10b14944ec5846ec1ac4422bb07, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9e71f89512314bb3a9e3d59d2c2941da, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/6a279dbd22c54096ba99ce21422e5f88] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.5 K 2024-11-20T17:26:35,619 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 43c1a2dd4ad844439d5fb904b5101ee9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123591551 2024-11-20T17:26:35,619 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1189d10b14944ec5846ec1ac4422bb07, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123591551 2024-11-20T17:26:35,619 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting bd5ff5b70ec44098a19609fa52ae713b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123591584 2024-11-20T17:26:35,619 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e71f89512314bb3a9e3d59d2c2941da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123591584 2024-11-20T17:26:35,620 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting df0b77be78c24a42bc9765b0e616a6c4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123593752 2024-11-20T17:26:35,620 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a279dbd22c54096ba99ce21422e5f88, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123593752 2024-11-20T17:26:35,626 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#A#compaction#382 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:35,626 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#B#compaction#381 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:35,627 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/b71f3e09019d4ef8893b5c4029da1fae is 50, key is test_row_0/B:col10/1732123594365/Put/seqid=0 2024-11-20T17:26:35,627 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/28d1d18f3ef84baf85aac468f848aa08 is 50, key is test_row_0/A:col10/1732123594365/Put/seqid=0 2024-11-20T17:26:35,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742280_1456 (size=12409) 2024-11-20T17:26:35,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742281_1457 (size=12409) 2024-11-20T17:26:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T17:26:35,714 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:35,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-20T17:26:35,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:35,715 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:26:35,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:35,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:35,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:35,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:35,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:35,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/29868b73f0134f569e6868dc13f7c17a is 50, key is test_row_0/A:col10/1732123594389/Put/seqid=0 2024-11-20T17:26:35,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742282_1458 (size=12151) 2024-11-20T17:26:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T17:26:36,038 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/28d1d18f3ef84baf85aac468f848aa08 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/28d1d18f3ef84baf85aac468f848aa08 2024-11-20T17:26:36,042 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/b71f3e09019d4ef8893b5c4029da1fae as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b71f3e09019d4ef8893b5c4029da1fae 2024-11-20T17:26:36,043 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/A of b3043adcc65f1dded05c4bfd9fcde44b into 28d1d18f3ef84baf85aac468f848aa08(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:36,043 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:36,043 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/A, priority=13, startTime=1732123595617; duration=0sec 2024-11-20T17:26:36,043 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:36,043 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:A 2024-11-20T17:26:36,043 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:36,044 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:36,044 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/C is initiating minor compaction (all files) 2024-11-20T17:26:36,044 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/C in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:36,044 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/abc4c70b0d85427fb3dd3251f1f1995b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a1d2b0f752df4950a219b2df75c801d0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/e7e0f63d5bb34e19a4935d90604bec8f] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.5 K 2024-11-20T17:26:36,044 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting abc4c70b0d85427fb3dd3251f1f1995b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732123591551 2024-11-20T17:26:36,045 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1d2b0f752df4950a219b2df75c801d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732123591584 2024-11-20T17:26:36,045 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7e0f63d5bb34e19a4935d90604bec8f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123593752 2024-11-20T17:26:36,046 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/B of b3043adcc65f1dded05c4bfd9fcde44b into b71f3e09019d4ef8893b5c4029da1fae(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:36,046 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:36,046 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/B, priority=13, startTime=1732123595618; duration=0sec 2024-11-20T17:26:36,047 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:36,047 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:B 2024-11-20T17:26:36,055 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#C#compaction#384 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:36,056 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/1d9d2b36e252483497378b667a6e188b is 50, key is test_row_0/C:col10/1732123594365/Put/seqid=0 2024-11-20T17:26:36,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742283_1459 (size=12409) 2024-11-20T17:26:36,123 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/29868b73f0134f569e6868dc13f7c17a 2024-11-20T17:26:36,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/792e56ea7de54fa4b9cf9e961087a1ab is 50, key is test_row_0/B:col10/1732123594389/Put/seqid=0 2024-11-20T17:26:36,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742284_1460 (size=12151) 2024-11-20T17:26:36,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T17:26:36,465 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/1d9d2b36e252483497378b667a6e188b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d9d2b36e252483497378b667a6e188b 2024-11-20T17:26:36,469 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/C of b3043adcc65f1dded05c4bfd9fcde44b into 1d9d2b36e252483497378b667a6e188b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:36,469 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:36,469 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/C, priority=13, startTime=1732123595618; duration=0sec 2024-11-20T17:26:36,469 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:36,469 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:C 2024-11-20T17:26:36,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:36,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:36,537 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/792e56ea7de54fa4b9cf9e961087a1ab 2024-11-20T17:26:36,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123656533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123656535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123656536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123656538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/b9d9a6c7362a4deaa940f0cbcb375097 is 50, key is test_row_0/C:col10/1732123594389/Put/seqid=0 2024-11-20T17:26:36,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123656539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742285_1461 (size=12151) 2024-11-20T17:26:36,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123656639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123656640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123656643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123656643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123656647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T17:26:36,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123656843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123656844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123656849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123656849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123656851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:36,953 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/b9d9a6c7362a4deaa940f0cbcb375097 2024-11-20T17:26:36,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/29868b73f0134f569e6868dc13f7c17a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/29868b73f0134f569e6868dc13f7c17a 2024-11-20T17:26:36,961 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/29868b73f0134f569e6868dc13f7c17a, entries=150, sequenceid=153, filesize=11.9 K 2024-11-20T17:26:36,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/792e56ea7de54fa4b9cf9e961087a1ab as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/792e56ea7de54fa4b9cf9e961087a1ab 2024-11-20T17:26:36,964 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/792e56ea7de54fa4b9cf9e961087a1ab, entries=150, sequenceid=153, filesize=11.9 K 2024-11-20T17:26:36,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/b9d9a6c7362a4deaa940f0cbcb375097 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/b9d9a6c7362a4deaa940f0cbcb375097 2024-11-20T17:26:36,969 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/b9d9a6c7362a4deaa940f0cbcb375097, entries=150, sequenceid=153, filesize=11.9 K 2024-11-20T17:26:36,970 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for b3043adcc65f1dded05c4bfd9fcde44b in 1255ms, sequenceid=153, compaction requested=false 2024-11-20T17:26:36,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:36,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:36,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-20T17:26:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-20T17:26:36,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-20T17:26:36,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4080 sec 2024-11-20T17:26:36,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.4120 sec 2024-11-20T17:26:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:37,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T17:26:37,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:37,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:37,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:37,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:37,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:37,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:37,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/3448538a381540faa3f09f4d884fb5a2 is 50, key is test_row_0/A:col10/1732123597150/Put/seqid=0 2024-11-20T17:26:37,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742286_1462 (size=14541) 2024-11-20T17:26:37,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/3448538a381540faa3f09f4d884fb5a2 2024-11-20T17:26:37,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/a86c73a2162d40b28eefe96bc8777ebd is 50, key is test_row_0/B:col10/1732123597150/Put/seqid=0 2024-11-20T17:26:37,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123657178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123657178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123657179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123657180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123657183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742287_1463 (size=12151) 2024-11-20T17:26:37,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/a86c73a2162d40b28eefe96bc8777ebd 2024-11-20T17:26:37,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/da7edcc919394bdcb9cf905ccd0a9f58 is 50, key is test_row_0/C:col10/1732123597150/Put/seqid=0 2024-11-20T17:26:37,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742288_1464 (size=12151) 2024-11-20T17:26:37,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123657287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123657287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123657287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123657287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123657287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123657492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123657494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123657494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123657494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123657495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/da7edcc919394bdcb9cf905ccd0a9f58 2024-11-20T17:26:37,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/3448538a381540faa3f09f4d884fb5a2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/3448538a381540faa3f09f4d884fb5a2 2024-11-20T17:26:37,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/3448538a381540faa3f09f4d884fb5a2, entries=200, sequenceid=172, filesize=14.2 K 2024-11-20T17:26:37,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/a86c73a2162d40b28eefe96bc8777ebd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/a86c73a2162d40b28eefe96bc8777ebd 2024-11-20T17:26:37,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/a86c73a2162d40b28eefe96bc8777ebd, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T17:26:37,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/da7edcc919394bdcb9cf905ccd0a9f58 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/da7edcc919394bdcb9cf905ccd0a9f58 2024-11-20T17:26:37,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/da7edcc919394bdcb9cf905ccd0a9f58, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T17:26:37,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for b3043adcc65f1dded05c4bfd9fcde44b in 467ms, sequenceid=172, compaction requested=true 2024-11-20T17:26:37,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:37,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:37,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:37,619 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:37,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:37,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:37,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:37,619 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:37,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:37,620 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39101 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:37,620 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/A is initiating minor compaction (all files) 2024-11-20T17:26:37,620 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/A in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:37,620 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:37,620 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/28d1d18f3ef84baf85aac468f848aa08, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/29868b73f0134f569e6868dc13f7c17a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/3448538a381540faa3f09f4d884fb5a2] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=38.2 K 2024-11-20T17:26:37,620 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/B is initiating minor compaction (all files) 2024-11-20T17:26:37,620 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/B in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:37,621 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b71f3e09019d4ef8893b5c4029da1fae, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/792e56ea7de54fa4b9cf9e961087a1ab, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/a86c73a2162d40b28eefe96bc8777ebd] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.9 K 2024-11-20T17:26:37,621 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28d1d18f3ef84baf85aac468f848aa08, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123593752 2024-11-20T17:26:37,621 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b71f3e09019d4ef8893b5c4029da1fae, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123593752 2024-11-20T17:26:37,621 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29868b73f0134f569e6868dc13f7c17a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732123594386 2024-11-20T17:26:37,621 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 792e56ea7de54fa4b9cf9e961087a1ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732123594386 2024-11-20T17:26:37,622 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3448538a381540faa3f09f4d884fb5a2, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123596531 2024-11-20T17:26:37,622 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a86c73a2162d40b28eefe96bc8777ebd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123596531 2024-11-20T17:26:37,627 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#A#compaction#390 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:37,628 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/73ad2385950c492c948fa46d263b381e is 50, key is test_row_0/A:col10/1732123597150/Put/seqid=0 2024-11-20T17:26:37,634 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#B#compaction#391 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:37,634 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/b3cacc9139fc4ae0adab85bd8f3e834d is 50, key is test_row_0/B:col10/1732123597150/Put/seqid=0 2024-11-20T17:26:37,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742289_1465 (size=12561) 2024-11-20T17:26:37,641 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/73ad2385950c492c948fa46d263b381e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/73ad2385950c492c948fa46d263b381e 2024-11-20T17:26:37,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742290_1466 (size=12561) 2024-11-20T17:26:37,646 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/A of b3043adcc65f1dded05c4bfd9fcde44b into 73ad2385950c492c948fa46d263b381e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:37,646 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:37,646 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/A, priority=13, startTime=1732123597619; duration=0sec 2024-11-20T17:26:37,646 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:37,646 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:A 2024-11-20T17:26:37,646 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:37,648 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:37,648 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/C is initiating minor compaction (all files) 2024-11-20T17:26:37,648 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/C in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:37,648 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d9d2b36e252483497378b667a6e188b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/b9d9a6c7362a4deaa940f0cbcb375097, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/da7edcc919394bdcb9cf905ccd0a9f58] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=35.9 K 2024-11-20T17:26:37,649 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d9d2b36e252483497378b667a6e188b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732123593752 2024-11-20T17:26:37,649 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9d9a6c7362a4deaa940f0cbcb375097, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732123594386 2024-11-20T17:26:37,649 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting da7edcc919394bdcb9cf905ccd0a9f58, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123596531 2024-11-20T17:26:37,651 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/b3cacc9139fc4ae0adab85bd8f3e834d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b3cacc9139fc4ae0adab85bd8f3e834d 2024-11-20T17:26:37,655 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/B of b3043adcc65f1dded05c4bfd9fcde44b into b3cacc9139fc4ae0adab85bd8f3e834d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:37,655 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:37,656 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/B, priority=13, startTime=1732123597619; duration=0sec 2024-11-20T17:26:37,656 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:37,656 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:B 2024-11-20T17:26:37,657 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#C#compaction#392 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:37,657 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6185a8f6ead54668ae9d5c1aa8742059 is 50, key is test_row_0/C:col10/1732123597150/Put/seqid=0 2024-11-20T17:26:37,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742291_1467 (size=12561) 2024-11-20T17:26:37,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-20T17:26:37,666 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-20T17:26:37,666 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6185a8f6ead54668ae9d5c1aa8742059 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6185a8f6ead54668ae9d5c1aa8742059 2024-11-20T17:26:37,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:37,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-20T17:26:37,669 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:37,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T17:26:37,670 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:37,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:37,670 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/C of b3043adcc65f1dded05c4bfd9fcde44b into 6185a8f6ead54668ae9d5c1aa8742059(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:37,670 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:37,670 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/C, priority=13, startTime=1732123597619; duration=0sec 2024-11-20T17:26:37,670 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:37,670 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:C 2024-11-20T17:26:37,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T17:26:37,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:37,799 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T17:26:37,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:37,800 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:37,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/f083441f383b4df983b5265b77bcea82 is 50, key is test_row_0/A:col10/1732123597179/Put/seqid=0 2024-11-20T17:26:37,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742292_1468 (size=12151) 2024-11-20T17:26:37,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123657810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123657810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123657811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123657812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123657816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,821 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:37,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:37,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:37,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:37,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:37,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:37,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:37,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123657917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123657917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123657918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123657918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:37,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123657920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:37,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T17:26:37,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:37,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:37,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:37,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:37,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:37,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:37,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123658121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123658122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123658122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123658123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123658124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,127 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:38,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:38,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:38,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/f083441f383b4df983b5265b77bcea82 2024-11-20T17:26:38,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/691cc1b29d9b4635b56012a7e44fa1d8 is 50, key is test_row_0/B:col10/1732123597179/Put/seqid=0 2024-11-20T17:26:38,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742293_1469 (size=12151) 2024-11-20T17:26:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T17:26:38,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:38,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:38,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:38,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123658426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123658426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123658427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123658427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123658427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:38,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:38,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/691cc1b29d9b4635b56012a7e44fa1d8 2024-11-20T17:26:38,626 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/5447b24835a94d36a1612f15050f8cf9 is 50, key is test_row_0/C:col10/1732123597179/Put/seqid=0 2024-11-20T17:26:38,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742294_1470 (size=12151) 2024-11-20T17:26:38,737 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:38,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T17:26:38,890 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:38,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:38,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:38,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:38,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:38,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123658932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123658933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123658934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123658934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:38,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123658937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:39,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/5447b24835a94d36a1612f15050f8cf9 2024-11-20T17:26:39,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/f083441f383b4df983b5265b77bcea82 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f083441f383b4df983b5265b77bcea82 2024-11-20T17:26:39,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f083441f383b4df983b5265b77bcea82, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T17:26:39,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/691cc1b29d9b4635b56012a7e44fa1d8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/691cc1b29d9b4635b56012a7e44fa1d8 2024-11-20T17:26:39,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/691cc1b29d9b4635b56012a7e44fa1d8, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T17:26:39,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/5447b24835a94d36a1612f15050f8cf9 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5447b24835a94d36a1612f15050f8cf9 2024-11-20T17:26:39,042 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:39,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:39,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:39,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:39,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:39,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:39,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:39,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5447b24835a94d36a1612f15050f8cf9, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T17:26:39,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for b3043adcc65f1dded05c4bfd9fcde44b in 1246ms, sequenceid=196, compaction requested=false 2024-11-20T17:26:39,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:39,195 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:39,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-20T17:26:39,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:39,196 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T17:26:39,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:39,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:39,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:39,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:39,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:39,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:39,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/64974ff8aefb499cbd48f78578fd16b3 is 50, key is test_row_0/A:col10/1732123597815/Put/seqid=0 2024-11-20T17:26:39,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742295_1471 (size=12151) 2024-11-20T17:26:39,604 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/64974ff8aefb499cbd48f78578fd16b3 2024-11-20T17:26:39,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/7a46af784b8247489bdeb8ea0ebbd5f3 is 50, key is test_row_0/B:col10/1732123597815/Put/seqid=0 2024-11-20T17:26:39,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742296_1472 (size=12151) 2024-11-20T17:26:39,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T17:26:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:39,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:39,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123659962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:39,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123659965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:39,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123659965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:39,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123659967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:39,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123659968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,015 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/7a46af784b8247489bdeb8ea0ebbd5f3 2024-11-20T17:26:40,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/112230080a17451bab6f8093ec3ed0fc is 50, key is test_row_0/C:col10/1732123597815/Put/seqid=0 2024-11-20T17:26:40,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742297_1473 (size=12151) 2024-11-20T17:26:40,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123660069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123660070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123660071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123660075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123660075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123660274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123660275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123660275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123660279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123660280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,426 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/112230080a17451bab6f8093ec3ed0fc 2024-11-20T17:26:40,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/64974ff8aefb499cbd48f78578fd16b3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/64974ff8aefb499cbd48f78578fd16b3 2024-11-20T17:26:40,434 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/64974ff8aefb499cbd48f78578fd16b3, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T17:26:40,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/7a46af784b8247489bdeb8ea0ebbd5f3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7a46af784b8247489bdeb8ea0ebbd5f3 2024-11-20T17:26:40,438 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7a46af784b8247489bdeb8ea0ebbd5f3, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T17:26:40,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/112230080a17451bab6f8093ec3ed0fc as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/112230080a17451bab6f8093ec3ed0fc 2024-11-20T17:26:40,442 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/112230080a17451bab6f8093ec3ed0fc, entries=150, sequenceid=211, filesize=11.9 K 2024-11-20T17:26:40,443 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for b3043adcc65f1dded05c4bfd9fcde44b in 1246ms, sequenceid=211, compaction requested=true 2024-11-20T17:26:40,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:40,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:40,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-20T17:26:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-20T17:26:40,445 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-20T17:26:40,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7740 sec 2024-11-20T17:26:40,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 2.7790 sec 2024-11-20T17:26:40,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:40,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:26:40,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:40,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:40,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:40,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:40,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:40,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:40,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/67552e37d35d42df8103b5e06fa40f8e is 50, key is test_row_0/A:col10/1732123599966/Put/seqid=0 2024-11-20T17:26:40,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742298_1474 (size=16931) 2024-11-20T17:26:40,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123660593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123660594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123660594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123660597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123660600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123660701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123660701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123660701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123660701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123660704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123660903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123660903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123660904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123660905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:40,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123660910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:40,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/67552e37d35d42df8103b5e06fa40f8e 2024-11-20T17:26:41,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/9303ab5b7da44a35984e0de9a1a98db4 is 50, key is test_row_0/B:col10/1732123599966/Put/seqid=0 2024-11-20T17:26:41,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742299_1475 (size=12151) 2024-11-20T17:26:41,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123661207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123661209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123661210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123661210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123661216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/9303ab5b7da44a35984e0de9a1a98db4 2024-11-20T17:26:41,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/733bab60f2fc4b80b4f51c9677a72f88 is 50, key is test_row_0/C:col10/1732123599966/Put/seqid=0 2024-11-20T17:26:41,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742300_1476 (size=12151) 2024-11-20T17:26:41,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123661711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123661713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123661715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123661718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:41,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123661725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:41,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T17:26:41,774 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-20T17:26:41,775 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:41,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-20T17:26:41,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:26:41,777 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:41,777 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:41,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:41,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/733bab60f2fc4b80b4f51c9677a72f88 2024-11-20T17:26:41,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/67552e37d35d42df8103b5e06fa40f8e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/67552e37d35d42df8103b5e06fa40f8e 2024-11-20T17:26:41,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/67552e37d35d42df8103b5e06fa40f8e, entries=250, sequenceid=234, filesize=16.5 K 2024-11-20T17:26:41,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/9303ab5b7da44a35984e0de9a1a98db4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/9303ab5b7da44a35984e0de9a1a98db4 2024-11-20T17:26:41,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/9303ab5b7da44a35984e0de9a1a98db4, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T17:26:41,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/733bab60f2fc4b80b4f51c9677a72f88 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/733bab60f2fc4b80b4f51c9677a72f88 2024-11-20T17:26:41,837 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/733bab60f2fc4b80b4f51c9677a72f88, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T17:26:41,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for b3043adcc65f1dded05c4bfd9fcde44b in 1253ms, sequenceid=234, compaction requested=true 2024-11-20T17:26:41,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:41,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:41,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:41,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:41,838 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:41,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:41,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:41,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:26:41,838 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:41,839 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53794 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:41,839 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/A is initiating minor compaction (all files) 2024-11-20T17:26:41,839 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/A in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:41,839 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/73ad2385950c492c948fa46d263b381e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f083441f383b4df983b5265b77bcea82, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/64974ff8aefb499cbd48f78578fd16b3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/67552e37d35d42df8103b5e06fa40f8e] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=52.5 K 2024-11-20T17:26:41,839 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:41,839 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/B is initiating minor compaction (all files) 2024-11-20T17:26:41,839 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/B in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:41,839 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b3cacc9139fc4ae0adab85bd8f3e834d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/691cc1b29d9b4635b56012a7e44fa1d8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7a46af784b8247489bdeb8ea0ebbd5f3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/9303ab5b7da44a35984e0de9a1a98db4] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=47.9 K 2024-11-20T17:26:41,840 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73ad2385950c492c948fa46d263b381e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123596531 2024-11-20T17:26:41,840 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b3cacc9139fc4ae0adab85bd8f3e834d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123596531 2024-11-20T17:26:41,840 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting f083441f383b4df983b5265b77bcea82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123597178 2024-11-20T17:26:41,840 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 691cc1b29d9b4635b56012a7e44fa1d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123597178 2024-11-20T17:26:41,840 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64974ff8aefb499cbd48f78578fd16b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123597810 2024-11-20T17:26:41,841 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a46af784b8247489bdeb8ea0ebbd5f3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123597810 2024-11-20T17:26:41,841 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67552e37d35d42df8103b5e06fa40f8e, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123599964 2024-11-20T17:26:41,841 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9303ab5b7da44a35984e0de9a1a98db4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123599966 2024-11-20T17:26:41,849 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#A#compaction#402 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:41,849 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#B#compaction#403 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:41,850 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/245fd2601e8f421a8b93ee3df3b2242a is 50, key is test_row_0/A:col10/1732123599966/Put/seqid=0 2024-11-20T17:26:41,850 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/b9260e2fb066434496b7e68fd25a0e79 is 50, key is test_row_0/B:col10/1732123599966/Put/seqid=0 2024-11-20T17:26:41,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742302_1478 (size=12697) 2024-11-20T17:26:41,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742301_1477 (size=12697) 2024-11-20T17:26:41,872 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/b9260e2fb066434496b7e68fd25a0e79 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b9260e2fb066434496b7e68fd25a0e79 2024-11-20T17:26:41,872 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/245fd2601e8f421a8b93ee3df3b2242a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/245fd2601e8f421a8b93ee3df3b2242a 2024-11-20T17:26:41,876 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/B of b3043adcc65f1dded05c4bfd9fcde44b into b9260e2fb066434496b7e68fd25a0e79(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:41,876 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/A of b3043adcc65f1dded05c4bfd9fcde44b into 245fd2601e8f421a8b93ee3df3b2242a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:41,876 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:41,876 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/B, priority=12, startTime=1732123601838; duration=0sec 2024-11-20T17:26:41,876 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:41,876 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/A, priority=12, startTime=1732123601838; duration=0sec 2024-11-20T17:26:41,876 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:41,876 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:B 2024-11-20T17:26:41,876 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:41,876 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:A 2024-11-20T17:26:41,876 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:26:41,877 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:26:41,877 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/C is initiating minor compaction (all files) 2024-11-20T17:26:41,878 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/C in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:41,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:26:41,878 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6185a8f6ead54668ae9d5c1aa8742059, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5447b24835a94d36a1612f15050f8cf9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/112230080a17451bab6f8093ec3ed0fc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/733bab60f2fc4b80b4f51c9677a72f88] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=47.9 K 2024-11-20T17:26:41,878 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6185a8f6ead54668ae9d5c1aa8742059, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732123596531 2024-11-20T17:26:41,879 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 5447b24835a94d36a1612f15050f8cf9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732123597178 2024-11-20T17:26:41,879 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 112230080a17451bab6f8093ec3ed0fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732123597810 2024-11-20T17:26:41,879 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 733bab60f2fc4b80b4f51c9677a72f88, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123599966 2024-11-20T17:26:41,886 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#C#compaction#404 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:41,886 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/87a1a7b0ac8a4a0eb91b8b95fe6d9cec is 50, key is test_row_0/C:col10/1732123599966/Put/seqid=0 2024-11-20T17:26:41,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742303_1479 (size=12697) 2024-11-20T17:26:41,929 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:41,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-20T17:26:41,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:41,930 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:26:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:41,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:41,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/9c72932933e44cd7835e74d686dcfbda is 50, key is test_row_0/A:col10/1732123600599/Put/seqid=0 2024-11-20T17:26:41,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742304_1480 (size=12151) 2024-11-20T17:26:41,939 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/9c72932933e44cd7835e74d686dcfbda 2024-11-20T17:26:41,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/7bd3811241844de5893f9ce42b0a5441 is 50, key is test_row_0/B:col10/1732123600599/Put/seqid=0 2024-11-20T17:26:41,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742305_1481 (size=12151) 2024-11-20T17:26:42,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:26:42,294 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/87a1a7b0ac8a4a0eb91b8b95fe6d9cec as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/87a1a7b0ac8a4a0eb91b8b95fe6d9cec 2024-11-20T17:26:42,299 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/C of b3043adcc65f1dded05c4bfd9fcde44b into 87a1a7b0ac8a4a0eb91b8b95fe6d9cec(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:42,299 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:42,299 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/C, priority=12, startTime=1732123601838; duration=0sec 2024-11-20T17:26:42,299 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:42,299 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:C 2024-11-20T17:26:42,350 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/7bd3811241844de5893f9ce42b0a5441 2024-11-20T17:26:42,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/8dfcdb00dd2c45ac99bb5db78ddbc62e is 50, key is test_row_0/C:col10/1732123600599/Put/seqid=0 2024-11-20T17:26:42,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742306_1482 (size=12151) 2024-11-20T17:26:42,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:26:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:42,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:42,763 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/8dfcdb00dd2c45ac99bb5db78ddbc62e 2024-11-20T17:26:42,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/9c72932933e44cd7835e74d686dcfbda as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9c72932933e44cd7835e74d686dcfbda 2024-11-20T17:26:42,771 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9c72932933e44cd7835e74d686dcfbda, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T17:26:42,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/7bd3811241844de5893f9ce42b0a5441 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7bd3811241844de5893f9ce42b0a5441 2024-11-20T17:26:42,776 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7bd3811241844de5893f9ce42b0a5441, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T17:26:42,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/8dfcdb00dd2c45ac99bb5db78ddbc62e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/8dfcdb00dd2c45ac99bb5db78ddbc62e 2024-11-20T17:26:42,779 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/8dfcdb00dd2c45ac99bb5db78ddbc62e, entries=150, sequenceid=249, filesize=11.9 K 2024-11-20T17:26:42,806 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=120.76 KB/123660 for b3043adcc65f1dded05c4bfd9fcde44b in 877ms, sequenceid=249, compaction requested=false 2024-11-20T17:26:42,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:42,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:42,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-20T17:26:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:42,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T17:26:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-20T17:26:42,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:42,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:42,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:42,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:42,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:42,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:42,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-20T17:26:42,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0300 sec 2024-11-20T17:26:42,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.0340 sec 2024-11-20T17:26:42,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/eaa94664ea734a969eb5d8581548a5fe is 50, key is test_row_0/A:col10/1732123602781/Put/seqid=0 2024-11-20T17:26:42,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742307_1483 (size=12301) 2024-11-20T17:26:42,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123662842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123662843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123662845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123662844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123662845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-20T17:26:42,880 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-20T17:26:42,881 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-20T17:26:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:26:42,883 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:42,884 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:42,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:42,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123662950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123662950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123662951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123662951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123662951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:26:43,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:43,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:26:43,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:43,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123663156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123663157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123663157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123663157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123663157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:26:43,190 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:43,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:26:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/eaa94664ea734a969eb5d8581548a5fe 2024-11-20T17:26:43,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/2d4465fcc52744339357eedf5653406c is 50, key is test_row_0/B:col10/1732123602781/Put/seqid=0 2024-11-20T17:26:43,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742308_1484 (size=12301) 2024-11-20T17:26:43,342 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:43,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:26:43,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:43,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123663461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123663462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123663463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123663463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123663463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:26:43,495 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:43,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:26:43,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:43,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/2d4465fcc52744339357eedf5653406c 2024-11-20T17:26:43,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:43,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:26:43,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:43,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/1d299202023843269307f855d55c1b98 is 50, key is test_row_0/C:col10/1732123602781/Put/seqid=0 2024-11-20T17:26:43,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742309_1485 (size=12301) 2024-11-20T17:26:43,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:26:43,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:43,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:26:43,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:43,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,953 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:43,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:26:43,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:43,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:43,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:43,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123663965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123663967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123663969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123663970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:43,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123663970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:43,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:26:44,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/1d299202023843269307f855d55c1b98 2024-11-20T17:26:44,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/eaa94664ea734a969eb5d8581548a5fe as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/eaa94664ea734a969eb5d8581548a5fe 2024-11-20T17:26:44,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/eaa94664ea734a969eb5d8581548a5fe, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T17:26:44,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/2d4465fcc52744339357eedf5653406c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2d4465fcc52744339357eedf5653406c 2024-11-20T17:26:44,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2d4465fcc52744339357eedf5653406c, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T17:26:44,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/1d299202023843269307f855d55c1b98 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d299202023843269307f855d55c1b98 2024-11-20T17:26:44,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d299202023843269307f855d55c1b98, entries=150, sequenceid=274, filesize=12.0 K 2024-11-20T17:26:44,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for b3043adcc65f1dded05c4bfd9fcde44b in 1270ms, sequenceid=274, compaction requested=true 2024-11-20T17:26:44,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:44,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:44,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:44,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:44,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:44,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:44,077 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:44,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:44,077 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:44,078 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:44,078 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:44,078 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/A is initiating minor compaction (all files) 2024-11-20T17:26:44,078 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/B is initiating minor compaction (all files) 2024-11-20T17:26:44,078 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/A in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:44,078 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/B in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:44,079 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/245fd2601e8f421a8b93ee3df3b2242a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9c72932933e44cd7835e74d686dcfbda, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/eaa94664ea734a969eb5d8581548a5fe] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=36.3 K 2024-11-20T17:26:44,079 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b9260e2fb066434496b7e68fd25a0e79, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7bd3811241844de5893f9ce42b0a5441, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2d4465fcc52744339357eedf5653406c] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=36.3 K 2024-11-20T17:26:44,079 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 245fd2601e8f421a8b93ee3df3b2242a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123599966 2024-11-20T17:26:44,079 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b9260e2fb066434496b7e68fd25a0e79, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123599966 2024-11-20T17:26:44,079 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bd3811241844de5893f9ce42b0a5441, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732123600591 2024-11-20T17:26:44,079 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c72932933e44cd7835e74d686dcfbda, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732123600591 2024-11-20T17:26:44,080 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d4465fcc52744339357eedf5653406c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123602776 2024-11-20T17:26:44,080 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting eaa94664ea734a969eb5d8581548a5fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123602776 2024-11-20T17:26:44,087 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#B#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:44,088 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/285552750fef44ca84a0073f5ec8b778 is 50, key is test_row_0/B:col10/1732123602781/Put/seqid=0 2024-11-20T17:26:44,099 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#A#compaction#412 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:44,099 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/f6f8cd4a41594f62a152ac24b4634f35 is 50, key is test_row_0/A:col10/1732123602781/Put/seqid=0 2024-11-20T17:26:44,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742310_1486 (size=12949) 2024-11-20T17:26:44,105 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:44,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T17:26:44,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:44,106 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:26:44,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:44,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:44,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:44,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:44,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:44,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:44,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/492359fe81d1443ab561816ddab5fcfe is 50, key is test_row_0/A:col10/1732123602844/Put/seqid=0 2024-11-20T17:26:44,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742311_1487 (size=12949) 2024-11-20T17:26:44,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742312_1488 (size=12301) 2024-11-20T17:26:44,506 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/285552750fef44ca84a0073f5ec8b778 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/285552750fef44ca84a0073f5ec8b778 2024-11-20T17:26:44,510 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/B of b3043adcc65f1dded05c4bfd9fcde44b into 285552750fef44ca84a0073f5ec8b778(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:44,510 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:44,510 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/B, priority=13, startTime=1732123604077; duration=0sec 2024-11-20T17:26:44,511 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:44,511 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:B 2024-11-20T17:26:44,511 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:44,512 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:44,512 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/C is initiating minor compaction (all files) 2024-11-20T17:26:44,512 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/C in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:44,512 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/87a1a7b0ac8a4a0eb91b8b95fe6d9cec, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/8dfcdb00dd2c45ac99bb5db78ddbc62e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d299202023843269307f855d55c1b98] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=36.3 K 2024-11-20T17:26:44,512 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 87a1a7b0ac8a4a0eb91b8b95fe6d9cec, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732123599966 2024-11-20T17:26:44,513 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dfcdb00dd2c45ac99bb5db78ddbc62e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732123600591 2024-11-20T17:26:44,513 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d299202023843269307f855d55c1b98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123602776 2024-11-20T17:26:44,519 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#C#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:44,519 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/fd62b1e4a8214d5c8a8be23b000a47e3 is 50, key is test_row_0/C:col10/1732123602781/Put/seqid=0 2024-11-20T17:26:44,524 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/f6f8cd4a41594f62a152ac24b4634f35 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f6f8cd4a41594f62a152ac24b4634f35 2024-11-20T17:26:44,525 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/492359fe81d1443ab561816ddab5fcfe 2024-11-20T17:26:44,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742313_1489 (size=12949) 2024-11-20T17:26:44,530 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/A of b3043adcc65f1dded05c4bfd9fcde44b into f6f8cd4a41594f62a152ac24b4634f35(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:44,530 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:44,530 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/A, priority=13, startTime=1732123604077; duration=0sec 2024-11-20T17:26:44,531 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:44,531 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:A 2024-11-20T17:26:44,532 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/fd62b1e4a8214d5c8a8be23b000a47e3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/fd62b1e4a8214d5c8a8be23b000a47e3 2024-11-20T17:26:44,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/fd01b24bc5c749dda33b185c9124fa4d is 50, key is test_row_0/B:col10/1732123602844/Put/seqid=0 2024-11-20T17:26:44,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742314_1490 (size=12301) 2024-11-20T17:26:44,539 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/C of b3043adcc65f1dded05c4bfd9fcde44b into fd62b1e4a8214d5c8a8be23b000a47e3(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:44,539 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:44,539 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/C, priority=13, startTime=1732123604077; duration=0sec 2024-11-20T17:26:44,539 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:44,539 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:C 2024-11-20T17:26:44,539 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/fd01b24bc5c749dda33b185c9124fa4d 2024-11-20T17:26:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/c52bcf6d9b08494497f2a9c80d9d56b7 is 50, key is test_row_0/C:col10/1732123602844/Put/seqid=0 2024-11-20T17:26:44,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742315_1491 (size=12301) 2024-11-20T17:26:44,954 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/c52bcf6d9b08494497f2a9c80d9d56b7 2024-11-20T17:26:44,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/492359fe81d1443ab561816ddab5fcfe as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/492359fe81d1443ab561816ddab5fcfe 2024-11-20T17:26:44,961 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/492359fe81d1443ab561816ddab5fcfe, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T17:26:44,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/fd01b24bc5c749dda33b185c9124fa4d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fd01b24bc5c749dda33b185c9124fa4d 2024-11-20T17:26:44,965 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fd01b24bc5c749dda33b185c9124fa4d, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T17:26:44,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/c52bcf6d9b08494497f2a9c80d9d56b7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/c52bcf6d9b08494497f2a9c80d9d56b7 2024-11-20T17:26:44,969 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/c52bcf6d9b08494497f2a9c80d9d56b7, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T17:26:44,970 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for b3043adcc65f1dded05c4bfd9fcde44b in 864ms, sequenceid=287, compaction requested=false 2024-11-20T17:26:44,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:44,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:44,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-20T17:26:44,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-20T17:26:44,976 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T17:26:44,976 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0870 sec 2024-11-20T17:26:44,977 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.0960 sec 2024-11-20T17:26:44,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T17:26:44,986 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-20T17:26:44,987 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:44,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-20T17:26:44,988 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:44,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:26:44,988 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:44,988 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:45,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:45,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:45,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:45,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:45,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:45,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:45,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:45,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:45,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/da90095b25f64be0b6e7c13814f99c55 is 50, key is test_row_0/A:col10/1732123605011/Put/seqid=0 2024-11-20T17:26:45,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742316_1492 (size=14741) 2024-11-20T17:26:45,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123665034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123665036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123665036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123665039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123665040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:26:45,139 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:45,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:26:45,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:45,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123665141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123665141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123665141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123665145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123665145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:26:45,293 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:45,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:26:45,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:45,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123665342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123665343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123665343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123665347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123665348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/da90095b25f64be0b6e7c13814f99c55 2024-11-20T17:26:45,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/f316b727511343839f7b135ed102755e is 50, key is test_row_0/B:col10/1732123605011/Put/seqid=0 2024-11-20T17:26:45,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742317_1493 (size=12301) 2024-11-20T17:26:45,445 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:45,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:26:45,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:45,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:26:45,598 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:45,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:26:45,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:45,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123665650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123665651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123665651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123665653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123665655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:45,750 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:45,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:26:45,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:45,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,751 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:45,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/f316b727511343839f7b135ed102755e 2024-11-20T17:26:45,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/4f4ce2b62d3e408ab775ffb7fb0923ed is 50, key is test_row_0/C:col10/1732123605011/Put/seqid=0 2024-11-20T17:26:45,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742318_1494 (size=12301) 2024-11-20T17:26:45,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/4f4ce2b62d3e408ab775ffb7fb0923ed 2024-11-20T17:26:45,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/da90095b25f64be0b6e7c13814f99c55 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/da90095b25f64be0b6e7c13814f99c55 2024-11-20T17:26:45,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/da90095b25f64be0b6e7c13814f99c55, entries=200, sequenceid=301, filesize=14.4 K 2024-11-20T17:26:45,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/f316b727511343839f7b135ed102755e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/f316b727511343839f7b135ed102755e 2024-11-20T17:26:45,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/f316b727511343839f7b135ed102755e, entries=150, sequenceid=301, filesize=12.0 K 2024-11-20T17:26:45,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/4f4ce2b62d3e408ab775ffb7fb0923ed as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/4f4ce2b62d3e408ab775ffb7fb0923ed 2024-11-20T17:26:45,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/4f4ce2b62d3e408ab775ffb7fb0923ed, entries=150, sequenceid=301, filesize=12.0 K 2024-11-20T17:26:45,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b3043adcc65f1dded05c4bfd9fcde44b in 847ms, sequenceid=301, compaction requested=true 2024-11-20T17:26:45,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:45,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:45,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:45,860 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:45,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:45,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:45,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:45,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T17:26:45,861 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:45,861 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:45,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:45,862 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/A is initiating minor compaction (all files) 2024-11-20T17:26:45,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/B is initiating minor compaction (all files) 2024-11-20T17:26:45,862 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/A in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,862 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/B in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,862 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f6f8cd4a41594f62a152ac24b4634f35, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/492359fe81d1443ab561816ddab5fcfe, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/da90095b25f64be0b6e7c13814f99c55] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=39.1 K 2024-11-20T17:26:45,862 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/285552750fef44ca84a0073f5ec8b778, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fd01b24bc5c749dda33b185c9124fa4d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/f316b727511343839f7b135ed102755e] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=36.7 K 2024-11-20T17:26:45,862 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6f8cd4a41594f62a152ac24b4634f35, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123602776 2024-11-20T17:26:45,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 285552750fef44ca84a0073f5ec8b778, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123602776 2024-11-20T17:26:45,862 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 492359fe81d1443ab561816ddab5fcfe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732123602841 2024-11-20T17:26:45,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting fd01b24bc5c749dda33b185c9124fa4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732123602841 2024-11-20T17:26:45,863 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting da90095b25f64be0b6e7c13814f99c55, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732123604978 2024-11-20T17:26:45,863 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting f316b727511343839f7b135ed102755e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732123604978 2024-11-20T17:26:45,870 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#A#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:45,871 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/fe8adef036244d1e839798324bf8ec45 is 50, key is test_row_0/A:col10/1732123605011/Put/seqid=0 2024-11-20T17:26:45,873 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#B#compaction#421 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:45,874 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/4f188159d10845798cf01172823f4ceb is 50, key is test_row_0/B:col10/1732123605011/Put/seqid=0 2024-11-20T17:26:45,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742319_1495 (size=13051) 2024-11-20T17:26:45,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742320_1496 (size=13051) 2024-11-20T17:26:45,903 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:45,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T17:26:45,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:45,904 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:26:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:45,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:45,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/0809926c8a9b433bb81b8198b1ee265f is 50, key is test_row_0/A:col10/1732123605033/Put/seqid=0 2024-11-20T17:26:45,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742321_1497 (size=12301) 2024-11-20T17:26:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:26:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:46,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:46,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123666162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123666162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123666164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123666165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123666166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123666268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123666268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123666268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123666270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123666271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,287 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/4f188159d10845798cf01172823f4ceb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/4f188159d10845798cf01172823f4ceb 2024-11-20T17:26:46,287 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/fe8adef036244d1e839798324bf8ec45 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/fe8adef036244d1e839798324bf8ec45 2024-11-20T17:26:46,291 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/A of b3043adcc65f1dded05c4bfd9fcde44b into fe8adef036244d1e839798324bf8ec45(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:46,291 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:46,291 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/A, priority=13, startTime=1732123605860; duration=0sec 2024-11-20T17:26:46,292 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:46,292 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:A 2024-11-20T17:26:46,292 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/B of b3043adcc65f1dded05c4bfd9fcde44b into 4f188159d10845798cf01172823f4ceb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:46,292 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:46,292 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:46,292 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/B, priority=13, startTime=1732123605860; duration=0sec 2024-11-20T17:26:46,292 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:46,292 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:B 2024-11-20T17:26:46,292 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:46,293 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/C is initiating minor compaction (all files) 2024-11-20T17:26:46,293 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/C in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:46,293 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/fd62b1e4a8214d5c8a8be23b000a47e3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/c52bcf6d9b08494497f2a9c80d9d56b7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/4f4ce2b62d3e408ab775ffb7fb0923ed] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=36.7 K 2024-11-20T17:26:46,293 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd62b1e4a8214d5c8a8be23b000a47e3, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732123602776 2024-11-20T17:26:46,293 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting c52bcf6d9b08494497f2a9c80d9d56b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732123602841 2024-11-20T17:26:46,294 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f4ce2b62d3e408ab775ffb7fb0923ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732123604978 2024-11-20T17:26:46,300 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#C#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:46,301 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/a0d46284b70447528b353fc90016dda1 is 50, key is test_row_0/C:col10/1732123605011/Put/seqid=0 2024-11-20T17:26:46,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742322_1498 (size=13051) 2024-11-20T17:26:46,313 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/0809926c8a9b433bb81b8198b1ee265f 2024-11-20T17:26:46,314 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/a0d46284b70447528b353fc90016dda1 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a0d46284b70447528b353fc90016dda1 2024-11-20T17:26:46,327 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/C of b3043adcc65f1dded05c4bfd9fcde44b into a0d46284b70447528b353fc90016dda1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:46,327 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:46,327 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/C, priority=13, startTime=1732123605861; duration=0sec 2024-11-20T17:26:46,328 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:46,328 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:C 2024-11-20T17:26:46,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/254efa28fe3347c5b2db84b25acb8ab9 is 50, key is test_row_0/B:col10/1732123605033/Put/seqid=0 2024-11-20T17:26:46,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742323_1499 (size=12301) 2024-11-20T17:26:46,336 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/254efa28fe3347c5b2db84b25acb8ab9 2024-11-20T17:26:46,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc is 50, key is test_row_0/C:col10/1732123605033/Put/seqid=0 2024-11-20T17:26:46,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742324_1500 (size=12301) 2024-11-20T17:26:46,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123666473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123666474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123666474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123666474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123666476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,755 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc 2024-11-20T17:26:46,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/0809926c8a9b433bb81b8198b1ee265f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0809926c8a9b433bb81b8198b1ee265f 2024-11-20T17:26:46,762 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0809926c8a9b433bb81b8198b1ee265f, entries=150, sequenceid=326, filesize=12.0 K 2024-11-20T17:26:46,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/254efa28fe3347c5b2db84b25acb8ab9 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/254efa28fe3347c5b2db84b25acb8ab9 2024-11-20T17:26:46,766 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/254efa28fe3347c5b2db84b25acb8ab9, entries=150, sequenceid=326, filesize=12.0 K 2024-11-20T17:26:46,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc 2024-11-20T17:26:46,770 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc, entries=150, sequenceid=326, filesize=12.0 K 2024-11-20T17:26:46,770 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b3043adcc65f1dded05c4bfd9fcde44b in 866ms, sequenceid=326, compaction requested=false 2024-11-20T17:26:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-20T17:26:46,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-20T17:26:46,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-20T17:26:46,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7830 sec 2024-11-20T17:26:46,774 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.7860 sec 2024-11-20T17:26:46,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:46,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:26:46,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:46,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:46,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:46,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:46,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:46,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:46,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/e9016b9628274798bd6825c6feefbdd0 is 50, key is test_row_0/A:col10/1732123606778/Put/seqid=0 2024-11-20T17:26:46,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742325_1501 (size=14741) 2024-11-20T17:26:46,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123666802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123666806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123666807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123666807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123666807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123666908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123666908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123666912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123666912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:46,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:46,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123666913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T17:26:47,091 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T17:26:47,093 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:47,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-20T17:26:47,094 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:47,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:26:47,095 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:47,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:47,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123667114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123667114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123667117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123667117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123667117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/e9016b9628274798bd6825c6feefbdd0 2024-11-20T17:26:47,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:26:47,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/35f153d2b30e4f0aa4914bae98263ded is 50, key is test_row_0/B:col10/1732123606778/Put/seqid=0 2024-11-20T17:26:47,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742326_1502 (size=12301) 2024-11-20T17:26:47,246 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:47,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:26:47,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:47,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:26:47,399 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:47,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:26:47,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:47,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123667420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123667421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123667422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123667423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123667423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:47,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:26:47,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:47,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/35f153d2b30e4f0aa4914bae98263ded 2024-11-20T17:26:47,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6048b2e3873b47d68a829e1276242dd4 is 50, key is test_row_0/C:col10/1732123606778/Put/seqid=0 2024-11-20T17:26:47,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742327_1503 (size=12301) 2024-11-20T17:26:47,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:26:47,704 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:47,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:26:47,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:47,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:47,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:26:47,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:47,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:47,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:47,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34250 deadline: 1732123667926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34276 deadline: 1732123667926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34258 deadline: 1732123667927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34228 deadline: 1732123667927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:47,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:47,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34272 deadline: 1732123667928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:48,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:48,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:26:48,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:48,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:48,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6048b2e3873b47d68a829e1276242dd4 2024-11-20T17:26:48,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:48,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:48,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:48,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:48,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/e9016b9628274798bd6825c6feefbdd0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e9016b9628274798bd6825c6feefbdd0 2024-11-20T17:26:48,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e9016b9628274798bd6825c6feefbdd0, entries=200, sequenceid=341, filesize=14.4 K 2024-11-20T17:26:48,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/35f153d2b30e4f0aa4914bae98263ded as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/35f153d2b30e4f0aa4914bae98263ded 2024-11-20T17:26:48,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/35f153d2b30e4f0aa4914bae98263ded, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T17:26:48,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6048b2e3873b47d68a829e1276242dd4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6048b2e3873b47d68a829e1276242dd4 2024-11-20T17:26:48,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6048b2e3873b47d68a829e1276242dd4, entries=150, sequenceid=341, filesize=12.0 K 2024-11-20T17:26:48,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for b3043adcc65f1dded05c4bfd9fcde44b in 1243ms, sequenceid=341, compaction requested=true 2024-11-20T17:26:48,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:48,024 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:48,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:26:48,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:48,024 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:48,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:26:48,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:48,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b3043adcc65f1dded05c4bfd9fcde44b:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:26:48,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:48,025 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:48,025 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:48,025 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/A is initiating minor compaction (all files) 2024-11-20T17:26:48,025 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/B is initiating minor compaction (all files) 2024-11-20T17:26:48,025 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/A in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:48,025 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/B in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:48,025 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/fe8adef036244d1e839798324bf8ec45, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0809926c8a9b433bb81b8198b1ee265f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e9016b9628274798bd6825c6feefbdd0] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=39.2 K 2024-11-20T17:26:48,025 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/4f188159d10845798cf01172823f4ceb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/254efa28fe3347c5b2db84b25acb8ab9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/35f153d2b30e4f0aa4914bae98263ded] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=36.8 K 2024-11-20T17:26:48,025 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe8adef036244d1e839798324bf8ec45, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732123604978 2024-11-20T17:26:48,025 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f188159d10845798cf01172823f4ceb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732123604978 2024-11-20T17:26:48,025 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0809926c8a9b433bb81b8198b1ee265f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732123605033 2024-11-20T17:26:48,026 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 254efa28fe3347c5b2db84b25acb8ab9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732123605033 2024-11-20T17:26:48,026 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9016b9628274798bd6825c6feefbdd0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732123606160 2024-11-20T17:26:48,026 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 35f153d2b30e4f0aa4914bae98263ded, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732123606160 2024-11-20T17:26:48,031 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#A#compaction#429 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:48,032 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/f7cdc06c31f5432b83c13dcb5880dae4 is 50, key is test_row_0/A:col10/1732123606778/Put/seqid=0 2024-11-20T17:26:48,032 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#B#compaction#430 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:48,033 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/dd26b8a430d041b1941056fea8fc1c8a is 50, key is test_row_0/B:col10/1732123606778/Put/seqid=0 2024-11-20T17:26:48,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742329_1505 (size=13153) 2024-11-20T17:26:48,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742328_1504 (size=13153) 2024-11-20T17:26:48,048 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/dd26b8a430d041b1941056fea8fc1c8a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/dd26b8a430d041b1941056fea8fc1c8a 2024-11-20T17:26:48,048 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/f7cdc06c31f5432b83c13dcb5880dae4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f7cdc06c31f5432b83c13dcb5880dae4 2024-11-20T17:26:48,052 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/B of b3043adcc65f1dded05c4bfd9fcde44b into dd26b8a430d041b1941056fea8fc1c8a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:48,052 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:48,052 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/B, priority=13, startTime=1732123608024; duration=0sec 2024-11-20T17:26:48,052 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:26:48,052 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:B 2024-11-20T17:26:48,052 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:26:48,053 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/A of b3043adcc65f1dded05c4bfd9fcde44b into f7cdc06c31f5432b83c13dcb5880dae4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:48,053 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:48,053 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/A, priority=13, startTime=1732123608023; duration=0sec 2024-11-20T17:26:48,053 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:48,053 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:A 2024-11-20T17:26:48,053 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:26:48,053 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): b3043adcc65f1dded05c4bfd9fcde44b/C is initiating minor compaction (all files) 2024-11-20T17:26:48,053 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b3043adcc65f1dded05c4bfd9fcde44b/C in TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:48,053 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a0d46284b70447528b353fc90016dda1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6048b2e3873b47d68a829e1276242dd4] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp, totalSize=36.8 K 2024-11-20T17:26:48,054 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a0d46284b70447528b353fc90016dda1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732123604978 2024-11-20T17:26:48,054 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting bd3eeac1ed8d4231bdcc4ce5f0aaedfc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1732123605033 2024-11-20T17:26:48,054 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6048b2e3873b47d68a829e1276242dd4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1732123606160 2024-11-20T17:26:48,060 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b3043adcc65f1dded05c4bfd9fcde44b#C#compaction#431 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:26:48,061 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/f1dbdb6e77b04f5fb44cfe2e22ef328a is 50, key is test_row_0/C:col10/1732123606778/Put/seqid=0 2024-11-20T17:26:48,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742330_1506 (size=13153) 2024-11-20T17:26:48,161 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:48,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T17:26:48,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:48,162 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:26:48,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:48,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:48,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:48,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:48,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:48,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:48,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/0fa727273ffb45c1a13c1e80a18086f2 is 50, key is test_row_0/A:col10/1732123606805/Put/seqid=0 2024-11-20T17:26:48,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742331_1507 (size=12301) 2024-11-20T17:26:48,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:26:48,257 DEBUG [Thread-1935 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x567011a8 to 127.0.0.1:56028 2024-11-20T17:26:48,257 DEBUG [Thread-1935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,258 DEBUG [Thread-1933 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47ef9951 to 127.0.0.1:56028 2024-11-20T17:26:48,258 DEBUG [Thread-1933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,259 DEBUG [Thread-1939 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d567fc2 to 127.0.0.1:56028 2024-11-20T17:26:48,259 DEBUG [Thread-1939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,261 DEBUG [Thread-1931 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40302925 to 127.0.0.1:56028 2024-11-20T17:26:48,261 DEBUG [Thread-1931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,261 DEBUG [Thread-1937 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x02430fee to 127.0.0.1:56028 2024-11-20T17:26:48,261 DEBUG [Thread-1937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,470 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/f1dbdb6e77b04f5fb44cfe2e22ef328a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/f1dbdb6e77b04f5fb44cfe2e22ef328a 2024-11-20T17:26:48,473 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b3043adcc65f1dded05c4bfd9fcde44b/C of b3043adcc65f1dded05c4bfd9fcde44b into f1dbdb6e77b04f5fb44cfe2e22ef328a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:26:48,473 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:48,473 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b., storeName=b3043adcc65f1dded05c4bfd9fcde44b/C, priority=13, startTime=1732123608024; duration=0sec 2024-11-20T17:26:48,474 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:26:48,474 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b3043adcc65f1dded05c4bfd9fcde44b:C 2024-11-20T17:26:48,570 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/0fa727273ffb45c1a13c1e80a18086f2 2024-11-20T17:26:48,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/c5504f4323fe4b369f71284f88896c7e is 50, key is test_row_0/B:col10/1732123606805/Put/seqid=0 2024-11-20T17:26:48,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742332_1508 (size=12301) 2024-11-20T17:26:48,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:48,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. as already flushing 2024-11-20T17:26:48,934 DEBUG [Thread-1928 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x402e5def to 127.0.0.1:56028 2024-11-20T17:26:48,934 DEBUG [Thread-1928 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,935 DEBUG [Thread-1926 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39b3baa5 to 127.0.0.1:56028 2024-11-20T17:26:48,935 DEBUG [Thread-1926 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,938 DEBUG [Thread-1922 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53fc02ba to 127.0.0.1:56028 2024-11-20T17:26:48,938 DEBUG [Thread-1922 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,938 DEBUG [Thread-1924 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2011d733 to 127.0.0.1:56028 2024-11-20T17:26:48,938 DEBUG [Thread-1924 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,940 DEBUG [Thread-1920 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65e17c26 to 127.0.0.1:56028 2024-11-20T17:26:48,940 DEBUG [Thread-1920 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:48,979 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/c5504f4323fe4b369f71284f88896c7e 2024-11-20T17:26:48,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/5bf2320c915a4fdcabc9ad64c7633651 is 50, key is test_row_0/C:col10/1732123606805/Put/seqid=0 2024-11-20T17:26:48,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742333_1509 (size=12301) 2024-11-20T17:26:49,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:26:49,388 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/5bf2320c915a4fdcabc9ad64c7633651 2024-11-20T17:26:49,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/0fa727273ffb45c1a13c1e80a18086f2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0fa727273ffb45c1a13c1e80a18086f2 2024-11-20T17:26:49,394 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0fa727273ffb45c1a13c1e80a18086f2, entries=150, sequenceid=368, filesize=12.0 K 2024-11-20T17:26:49,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/c5504f4323fe4b369f71284f88896c7e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/c5504f4323fe4b369f71284f88896c7e 2024-11-20T17:26:49,397 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/c5504f4323fe4b369f71284f88896c7e, entries=150, sequenceid=368, filesize=12.0 K 2024-11-20T17:26:49,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/5bf2320c915a4fdcabc9ad64c7633651 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5bf2320c915a4fdcabc9ad64c7633651 2024-11-20T17:26:49,400 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5bf2320c915a4fdcabc9ad64c7633651, entries=150, sequenceid=368, filesize=12.0 K 2024-11-20T17:26:49,400 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=33.54 KB/34350 for b3043adcc65f1dded05c4bfd9fcde44b in 1238ms, sequenceid=368, compaction requested=false 2024-11-20T17:26:49,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:49,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:49,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-20T17:26:49,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-20T17:26:49,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T17:26:49,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3060 sec 2024-11-20T17:26:49,403 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.3090 sec 2024-11-20T17:26:51,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T17:26:51,198 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2965 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8895 rows 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2977 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8931 rows 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2968 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8904 rows 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2968 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8904 rows 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2962 2024-11-20T17:26:51,199 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8886 rows 2024-11-20T17:26:51,199 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:26:51,199 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38dd8644 to 127.0.0.1:56028 2024-11-20T17:26:51,199 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:26:51,202 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:26:51,203 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:26:51,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:51,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:26:51,206 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123611206"}]},"ts":"1732123611206"} 2024-11-20T17:26:51,207 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:26:51,209 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:26:51,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:26:51,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b3043adcc65f1dded05c4bfd9fcde44b, UNASSIGN}] 2024-11-20T17:26:51,211 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=137, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b3043adcc65f1dded05c4bfd9fcde44b, UNASSIGN 2024-11-20T17:26:51,211 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=b3043adcc65f1dded05c4bfd9fcde44b, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:51,212 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:26:51,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; CloseRegionProcedure b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:26:51,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:26:51,363 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:51,363 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(124): Close b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:51,363 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:26:51,363 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1681): Closing b3043adcc65f1dded05c4bfd9fcde44b, disabling compactions & flushes 2024-11-20T17:26:51,363 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:51,363 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:51,363 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. after waiting 0 ms 2024-11-20T17:26:51,363 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:51,364 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(2837): Flushing b3043adcc65f1dded05c4bfd9fcde44b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:26:51,364 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=A 2024-11-20T17:26:51,364 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:51,364 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=B 2024-11-20T17:26:51,364 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:51,364 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b3043adcc65f1dded05c4bfd9fcde44b, store=C 2024-11-20T17:26:51,364 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:51,367 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/1a0f0ddfed584b5fbb3edf3856c284d3 is 50, key is test_row_0/A:col10/1732123608937/Put/seqid=0 2024-11-20T17:26:51,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742334_1510 (size=12301) 2024-11-20T17:26:51,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:26:51,771 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/1a0f0ddfed584b5fbb3edf3856c284d3 2024-11-20T17:26:51,776 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/465439fb668c4d058a6abc8a02f1e3c5 is 50, key is test_row_0/B:col10/1732123608937/Put/seqid=0 2024-11-20T17:26:51,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742335_1511 (size=12301) 2024-11-20T17:26:51,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:26:52,180 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/465439fb668c4d058a6abc8a02f1e3c5 2024-11-20T17:26:52,185 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6635f94e4c734f77b4667735873b91b0 is 50, key is test_row_0/C:col10/1732123608937/Put/seqid=0 2024-11-20T17:26:52,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742336_1512 (size=12301) 2024-11-20T17:26:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:26:52,590 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6635f94e4c734f77b4667735873b91b0 2024-11-20T17:26:52,594 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/A/1a0f0ddfed584b5fbb3edf3856c284d3 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1a0f0ddfed584b5fbb3edf3856c284d3 2024-11-20T17:26:52,596 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1a0f0ddfed584b5fbb3edf3856c284d3, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T17:26:52,597 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/B/465439fb668c4d058a6abc8a02f1e3c5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/465439fb668c4d058a6abc8a02f1e3c5 2024-11-20T17:26:52,599 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/465439fb668c4d058a6abc8a02f1e3c5, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T17:26:52,600 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/.tmp/C/6635f94e4c734f77b4667735873b91b0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6635f94e4c734f77b4667735873b91b0 2024-11-20T17:26:52,602 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6635f94e4c734f77b4667735873b91b0, entries=150, sequenceid=377, filesize=12.0 K 2024-11-20T17:26:52,603 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for b3043adcc65f1dded05c4bfd9fcde44b in 1240ms, sequenceid=377, compaction requested=true 2024-11-20T17:26:52,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/bad865775069413696e5a7f3de1f6afb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e48bc44672f74b898803e43fd1b59d62, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/643ac2ccae094a5d80ad7eed094b0252, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0d804aa774bb463a9673970bda8bf64a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/acaadd3292e94a7ca7e426f580fc58a0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/59713f3970ac425cb58d98cd485c2f8a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1189d10b14944ec5846ec1ac4422bb07, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9e71f89512314bb3a9e3d59d2c2941da, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/28d1d18f3ef84baf85aac468f848aa08, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/6a279dbd22c54096ba99ce21422e5f88, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/29868b73f0134f569e6868dc13f7c17a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/3448538a381540faa3f09f4d884fb5a2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/73ad2385950c492c948fa46d263b381e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f083441f383b4df983b5265b77bcea82, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/64974ff8aefb499cbd48f78578fd16b3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/67552e37d35d42df8103b5e06fa40f8e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/245fd2601e8f421a8b93ee3df3b2242a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9c72932933e44cd7835e74d686dcfbda, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f6f8cd4a41594f62a152ac24b4634f35, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/eaa94664ea734a969eb5d8581548a5fe, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/492359fe81d1443ab561816ddab5fcfe, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/da90095b25f64be0b6e7c13814f99c55, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/fe8adef036244d1e839798324bf8ec45, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0809926c8a9b433bb81b8198b1ee265f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e9016b9628274798bd6825c6feefbdd0] to archive 2024-11-20T17:26:52,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:52,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/bad865775069413696e5a7f3de1f6afb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/bad865775069413696e5a7f3de1f6afb 2024-11-20T17:26:52,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e48bc44672f74b898803e43fd1b59d62 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e48bc44672f74b898803e43fd1b59d62 2024-11-20T17:26:52,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/643ac2ccae094a5d80ad7eed094b0252 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/643ac2ccae094a5d80ad7eed094b0252 2024-11-20T17:26:52,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0d804aa774bb463a9673970bda8bf64a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0d804aa774bb463a9673970bda8bf64a 2024-11-20T17:26:52,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/acaadd3292e94a7ca7e426f580fc58a0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/acaadd3292e94a7ca7e426f580fc58a0 2024-11-20T17:26:52,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/59713f3970ac425cb58d98cd485c2f8a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/59713f3970ac425cb58d98cd485c2f8a 2024-11-20T17:26:52,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1189d10b14944ec5846ec1ac4422bb07 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1189d10b14944ec5846ec1ac4422bb07 2024-11-20T17:26:52,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9e71f89512314bb3a9e3d59d2c2941da to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9e71f89512314bb3a9e3d59d2c2941da 2024-11-20T17:26:52,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/28d1d18f3ef84baf85aac468f848aa08 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/28d1d18f3ef84baf85aac468f848aa08 2024-11-20T17:26:52,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/6a279dbd22c54096ba99ce21422e5f88 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/6a279dbd22c54096ba99ce21422e5f88 2024-11-20T17:26:52,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/29868b73f0134f569e6868dc13f7c17a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/29868b73f0134f569e6868dc13f7c17a 2024-11-20T17:26:52,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/3448538a381540faa3f09f4d884fb5a2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/3448538a381540faa3f09f4d884fb5a2 2024-11-20T17:26:52,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/73ad2385950c492c948fa46d263b381e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/73ad2385950c492c948fa46d263b381e 2024-11-20T17:26:52,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f083441f383b4df983b5265b77bcea82 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f083441f383b4df983b5265b77bcea82 2024-11-20T17:26:52,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/64974ff8aefb499cbd48f78578fd16b3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/64974ff8aefb499cbd48f78578fd16b3 2024-11-20T17:26:52,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/67552e37d35d42df8103b5e06fa40f8e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/67552e37d35d42df8103b5e06fa40f8e 2024-11-20T17:26:52,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/245fd2601e8f421a8b93ee3df3b2242a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/245fd2601e8f421a8b93ee3df3b2242a 2024-11-20T17:26:52,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9c72932933e44cd7835e74d686dcfbda to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/9c72932933e44cd7835e74d686dcfbda 2024-11-20T17:26:52,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f6f8cd4a41594f62a152ac24b4634f35 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f6f8cd4a41594f62a152ac24b4634f35 2024-11-20T17:26:52,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/eaa94664ea734a969eb5d8581548a5fe to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/eaa94664ea734a969eb5d8581548a5fe 2024-11-20T17:26:52,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/492359fe81d1443ab561816ddab5fcfe to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/492359fe81d1443ab561816ddab5fcfe 2024-11-20T17:26:52,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/da90095b25f64be0b6e7c13814f99c55 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/da90095b25f64be0b6e7c13814f99c55 2024-11-20T17:26:52,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/fe8adef036244d1e839798324bf8ec45 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/fe8adef036244d1e839798324bf8ec45 2024-11-20T17:26:52,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0809926c8a9b433bb81b8198b1ee265f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0809926c8a9b433bb81b8198b1ee265f 2024-11-20T17:26:52,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e9016b9628274798bd6825c6feefbdd0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/e9016b9628274798bd6825c6feefbdd0 2024-11-20T17:26:52,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/447fd3115def45eaa1ce6f780b0b9452, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fab796eee72e4adb8b0c7f42a4a84ff5, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/075967393db04289ad365bbb74547112, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2e665a485f4a4ad8bd24e4b84674af29, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/d84c9154f4de4048ab8fe1f7a06ded2d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/43c1a2dd4ad844439d5fb904b5101ee9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/539f8e96a39e4473906fcc37aa0c9946, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/bd5ff5b70ec44098a19609fa52ae713b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b71f3e09019d4ef8893b5c4029da1fae, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/df0b77be78c24a42bc9765b0e616a6c4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/792e56ea7de54fa4b9cf9e961087a1ab, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b3cacc9139fc4ae0adab85bd8f3e834d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/a86c73a2162d40b28eefe96bc8777ebd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/691cc1b29d9b4635b56012a7e44fa1d8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7a46af784b8247489bdeb8ea0ebbd5f3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b9260e2fb066434496b7e68fd25a0e79, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/9303ab5b7da44a35984e0de9a1a98db4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7bd3811241844de5893f9ce42b0a5441, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/285552750fef44ca84a0073f5ec8b778, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2d4465fcc52744339357eedf5653406c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fd01b24bc5c749dda33b185c9124fa4d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/4f188159d10845798cf01172823f4ceb, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/f316b727511343839f7b135ed102755e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/254efa28fe3347c5b2db84b25acb8ab9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/35f153d2b30e4f0aa4914bae98263ded] to archive 2024-11-20T17:26:52,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:52,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/447fd3115def45eaa1ce6f780b0b9452 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/447fd3115def45eaa1ce6f780b0b9452 2024-11-20T17:26:52,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fab796eee72e4adb8b0c7f42a4a84ff5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fab796eee72e4adb8b0c7f42a4a84ff5 2024-11-20T17:26:52,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/075967393db04289ad365bbb74547112 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/075967393db04289ad365bbb74547112 2024-11-20T17:26:52,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2e665a485f4a4ad8bd24e4b84674af29 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2e665a485f4a4ad8bd24e4b84674af29 2024-11-20T17:26:52,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/d84c9154f4de4048ab8fe1f7a06ded2d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/d84c9154f4de4048ab8fe1f7a06ded2d 2024-11-20T17:26:52,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/43c1a2dd4ad844439d5fb904b5101ee9 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/43c1a2dd4ad844439d5fb904b5101ee9 2024-11-20T17:26:52,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/539f8e96a39e4473906fcc37aa0c9946 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/539f8e96a39e4473906fcc37aa0c9946 2024-11-20T17:26:52,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/bd5ff5b70ec44098a19609fa52ae713b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/bd5ff5b70ec44098a19609fa52ae713b 2024-11-20T17:26:52,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b71f3e09019d4ef8893b5c4029da1fae to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b71f3e09019d4ef8893b5c4029da1fae 2024-11-20T17:26:52,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/df0b77be78c24a42bc9765b0e616a6c4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/df0b77be78c24a42bc9765b0e616a6c4 2024-11-20T17:26:52,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/792e56ea7de54fa4b9cf9e961087a1ab to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/792e56ea7de54fa4b9cf9e961087a1ab 2024-11-20T17:26:52,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b3cacc9139fc4ae0adab85bd8f3e834d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b3cacc9139fc4ae0adab85bd8f3e834d 2024-11-20T17:26:52,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/a86c73a2162d40b28eefe96bc8777ebd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/a86c73a2162d40b28eefe96bc8777ebd 2024-11-20T17:26:52,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/691cc1b29d9b4635b56012a7e44fa1d8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/691cc1b29d9b4635b56012a7e44fa1d8 2024-11-20T17:26:52,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7a46af784b8247489bdeb8ea0ebbd5f3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7a46af784b8247489bdeb8ea0ebbd5f3 2024-11-20T17:26:52,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b9260e2fb066434496b7e68fd25a0e79 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/b9260e2fb066434496b7e68fd25a0e79 2024-11-20T17:26:52,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/9303ab5b7da44a35984e0de9a1a98db4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/9303ab5b7da44a35984e0de9a1a98db4 2024-11-20T17:26:52,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7bd3811241844de5893f9ce42b0a5441 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/7bd3811241844de5893f9ce42b0a5441 2024-11-20T17:26:52,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/285552750fef44ca84a0073f5ec8b778 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/285552750fef44ca84a0073f5ec8b778 2024-11-20T17:26:52,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2d4465fcc52744339357eedf5653406c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/2d4465fcc52744339357eedf5653406c 2024-11-20T17:26:52,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fd01b24bc5c749dda33b185c9124fa4d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/fd01b24bc5c749dda33b185c9124fa4d 2024-11-20T17:26:52,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/4f188159d10845798cf01172823f4ceb to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/4f188159d10845798cf01172823f4ceb 2024-11-20T17:26:52,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/f316b727511343839f7b135ed102755e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/f316b727511343839f7b135ed102755e 2024-11-20T17:26:52,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/254efa28fe3347c5b2db84b25acb8ab9 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/254efa28fe3347c5b2db84b25acb8ab9 2024-11-20T17:26:52,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/35f153d2b30e4f0aa4914bae98263ded to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/35f153d2b30e4f0aa4914bae98263ded 2024-11-20T17:26:52,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/26aae5d815d3424296cc79d3a39cff2b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/14de6a676eeb41ffbcaa9d916a9a4fd6, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/47724df206cf426aa5ffd7aa4310478b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1e7e27358e65464dabbaefd5788df26e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/abc4c70b0d85427fb3dd3251f1f1995b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/af83b453f6334c28a7280e20155c90f2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a1d2b0f752df4950a219b2df75c801d0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d9d2b36e252483497378b667a6e188b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/e7e0f63d5bb34e19a4935d90604bec8f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/b9d9a6c7362a4deaa940f0cbcb375097, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6185a8f6ead54668ae9d5c1aa8742059, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/da7edcc919394bdcb9cf905ccd0a9f58, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5447b24835a94d36a1612f15050f8cf9, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/112230080a17451bab6f8093ec3ed0fc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/87a1a7b0ac8a4a0eb91b8b95fe6d9cec, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/733bab60f2fc4b80b4f51c9677a72f88, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/8dfcdb00dd2c45ac99bb5db78ddbc62e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/fd62b1e4a8214d5c8a8be23b000a47e3, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d299202023843269307f855d55c1b98, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/c52bcf6d9b08494497f2a9c80d9d56b7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a0d46284b70447528b353fc90016dda1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/4f4ce2b62d3e408ab775ffb7fb0923ed, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6048b2e3873b47d68a829e1276242dd4] to archive 2024-11-20T17:26:52,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:26:52,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/26aae5d815d3424296cc79d3a39cff2b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/26aae5d815d3424296cc79d3a39cff2b 2024-11-20T17:26:52,650 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/14de6a676eeb41ffbcaa9d916a9a4fd6 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/14de6a676eeb41ffbcaa9d916a9a4fd6 2024-11-20T17:26:52,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/47724df206cf426aa5ffd7aa4310478b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/47724df206cf426aa5ffd7aa4310478b 2024-11-20T17:26:52,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6eaa1a8c8e2a4fb290c90f1ce4b0d372 2024-11-20T17:26:52,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1e7e27358e65464dabbaefd5788df26e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1e7e27358e65464dabbaefd5788df26e 2024-11-20T17:26:52,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/abc4c70b0d85427fb3dd3251f1f1995b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/abc4c70b0d85427fb3dd3251f1f1995b 2024-11-20T17:26:52,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/af83b453f6334c28a7280e20155c90f2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/af83b453f6334c28a7280e20155c90f2 2024-11-20T17:26:52,654 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a1d2b0f752df4950a219b2df75c801d0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a1d2b0f752df4950a219b2df75c801d0 2024-11-20T17:26:52,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d9d2b36e252483497378b667a6e188b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d9d2b36e252483497378b667a6e188b 2024-11-20T17:26:52,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/e7e0f63d5bb34e19a4935d90604bec8f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/e7e0f63d5bb34e19a4935d90604bec8f 2024-11-20T17:26:52,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/b9d9a6c7362a4deaa940f0cbcb375097 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/b9d9a6c7362a4deaa940f0cbcb375097 2024-11-20T17:26:52,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6185a8f6ead54668ae9d5c1aa8742059 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6185a8f6ead54668ae9d5c1aa8742059 2024-11-20T17:26:52,658 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/da7edcc919394bdcb9cf905ccd0a9f58 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/da7edcc919394bdcb9cf905ccd0a9f58 2024-11-20T17:26:52,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5447b24835a94d36a1612f15050f8cf9 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5447b24835a94d36a1612f15050f8cf9 2024-11-20T17:26:52,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/112230080a17451bab6f8093ec3ed0fc to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/112230080a17451bab6f8093ec3ed0fc 2024-11-20T17:26:52,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/87a1a7b0ac8a4a0eb91b8b95fe6d9cec to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/87a1a7b0ac8a4a0eb91b8b95fe6d9cec 2024-11-20T17:26:52,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/733bab60f2fc4b80b4f51c9677a72f88 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/733bab60f2fc4b80b4f51c9677a72f88 2024-11-20T17:26:52,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/8dfcdb00dd2c45ac99bb5db78ddbc62e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/8dfcdb00dd2c45ac99bb5db78ddbc62e 2024-11-20T17:26:52,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/fd62b1e4a8214d5c8a8be23b000a47e3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/fd62b1e4a8214d5c8a8be23b000a47e3 2024-11-20T17:26:52,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d299202023843269307f855d55c1b98 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/1d299202023843269307f855d55c1b98 2024-11-20T17:26:52,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/c52bcf6d9b08494497f2a9c80d9d56b7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/c52bcf6d9b08494497f2a9c80d9d56b7 2024-11-20T17:26:52,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a0d46284b70447528b353fc90016dda1 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/a0d46284b70447528b353fc90016dda1 2024-11-20T17:26:52,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/4f4ce2b62d3e408ab775ffb7fb0923ed to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/4f4ce2b62d3e408ab775ffb7fb0923ed 2024-11-20T17:26:52,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/bd3eeac1ed8d4231bdcc4ce5f0aaedfc 2024-11-20T17:26:52,668 DEBUG [StoreCloser-TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6048b2e3873b47d68a829e1276242dd4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6048b2e3873b47d68a829e1276242dd4 2024-11-20T17:26:52,671 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/recovered.edits/380.seqid, newMaxSeqId=380, maxSeqId=1 2024-11-20T17:26:52,672 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b. 2024-11-20T17:26:52,672 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1635): Region close journal for b3043adcc65f1dded05c4bfd9fcde44b: 2024-11-20T17:26:52,673 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(170): Closed b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:52,673 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=b3043adcc65f1dded05c4bfd9fcde44b, regionState=CLOSED 2024-11-20T17:26:52,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T17:26:52,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; CloseRegionProcedure b3043adcc65f1dded05c4bfd9fcde44b, server=d514dc944523,44015,1732123455293 in 1.4620 sec 2024-11-20T17:26:52,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=137 2024-11-20T17:26:52,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=137, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b3043adcc65f1dded05c4bfd9fcde44b, UNASSIGN in 1.4650 sec 2024-11-20T17:26:52,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T17:26:52,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4670 sec 2024-11-20T17:26:52,678 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123612678"}]},"ts":"1732123612678"} 2024-11-20T17:26:52,678 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:26:52,680 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:26:52,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4780 sec 2024-11-20T17:26:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T17:26:53,309 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T17:26:53,310 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:26:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:53,311 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=140, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:26:53,311 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=140, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:53,313 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:53,314 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/recovered.edits] 2024-11-20T17:26:53,317 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0fa727273ffb45c1a13c1e80a18086f2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/0fa727273ffb45c1a13c1e80a18086f2 2024-11-20T17:26:53,317 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1a0f0ddfed584b5fbb3edf3856c284d3 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/1a0f0ddfed584b5fbb3edf3856c284d3 2024-11-20T17:26:53,318 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f7cdc06c31f5432b83c13dcb5880dae4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/A/f7cdc06c31f5432b83c13dcb5880dae4 2024-11-20T17:26:53,320 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/465439fb668c4d058a6abc8a02f1e3c5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/465439fb668c4d058a6abc8a02f1e3c5 2024-11-20T17:26:53,321 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/c5504f4323fe4b369f71284f88896c7e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/c5504f4323fe4b369f71284f88896c7e 2024-11-20T17:26:53,322 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/dd26b8a430d041b1941056fea8fc1c8a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/B/dd26b8a430d041b1941056fea8fc1c8a 2024-11-20T17:26:53,323 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5bf2320c915a4fdcabc9ad64c7633651 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/5bf2320c915a4fdcabc9ad64c7633651 2024-11-20T17:26:53,324 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6635f94e4c734f77b4667735873b91b0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/6635f94e4c734f77b4667735873b91b0 2024-11-20T17:26:53,325 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/f1dbdb6e77b04f5fb44cfe2e22ef328a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/C/f1dbdb6e77b04f5fb44cfe2e22ef328a 2024-11-20T17:26:53,326 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/recovered.edits/380.seqid to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b/recovered.edits/380.seqid 2024-11-20T17:26:53,327 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/b3043adcc65f1dded05c4bfd9fcde44b 2024-11-20T17:26:53,327 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:26:53,329 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=140, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:53,330 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:26:53,331 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:26:53,332 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=140, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:53,332 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:26:53,332 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123613332"}]},"ts":"9223372036854775807"} 2024-11-20T17:26:53,334 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:26:53,334 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b3043adcc65f1dded05c4bfd9fcde44b, NAME => 'TestAcidGuarantees,,1732123586091.b3043adcc65f1dded05c4bfd9fcde44b.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:26:53,334 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:26:53,334 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123613334"}]},"ts":"9223372036854775807"} 2024-11-20T17:26:53,335 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:26:53,337 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=140, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:53,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 27 msec 2024-11-20T17:26:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T17:26:53,412 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T17:26:53,421 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=239 (was 242), OpenFileDescriptor=450 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=308 (was 289) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6112 (was 6128) 2024-11-20T17:26:53,430 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=308, ProcessCount=11, AvailableMemoryMB=6112 2024-11-20T17:26:53,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:26:53,431 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:26:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:53,432 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T17:26:53,432 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:53,432 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 141 2024-11-20T17:26:53,433 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T17:26:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-20T17:26:53,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742337_1513 (size=963) 2024-11-20T17:26:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-20T17:26:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-20T17:26:53,841 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff 2024-11-20T17:26:53,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742338_1514 (size=53) 2024-11-20T17:26:54,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-20T17:26:54,246 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:26:54,246 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 461e1341891f7389f7e7df762c9abfba, disabling compactions & flushes 2024-11-20T17:26:54,246 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:54,246 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:54,246 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. after waiting 0 ms 2024-11-20T17:26:54,246 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:54,246 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:54,246 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:26:54,247 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T17:26:54,247 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732123614247"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732123614247"}]},"ts":"1732123614247"} 2024-11-20T17:26:54,248 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T17:26:54,248 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T17:26:54,248 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123614248"}]},"ts":"1732123614248"} 2024-11-20T17:26:54,249 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T17:26:54,253 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, ASSIGN}] 2024-11-20T17:26:54,253 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, ASSIGN 2024-11-20T17:26:54,254 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, ASSIGN; state=OFFLINE, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=false 2024-11-20T17:26:54,404 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=461e1341891f7389f7e7df762c9abfba, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:54,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; OpenRegionProcedure 461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:26:54,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-20T17:26:54,556 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:54,559 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:54,559 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7285): Opening region: {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:26:54,559 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,559 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:26:54,559 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7327): checking encryption for 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,559 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7330): checking classloading for 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,560 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,562 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:54,562 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 461e1341891f7389f7e7df762c9abfba columnFamilyName A 2024-11-20T17:26:54,562 DEBUG [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:54,562 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(327): Store=461e1341891f7389f7e7df762c9abfba/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:54,562 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,563 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:54,563 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 461e1341891f7389f7e7df762c9abfba columnFamilyName B 2024-11-20T17:26:54,563 DEBUG [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:54,564 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(327): Store=461e1341891f7389f7e7df762c9abfba/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:54,564 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,564 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:54,565 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 461e1341891f7389f7e7df762c9abfba columnFamilyName C 2024-11-20T17:26:54,565 DEBUG [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:54,565 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(327): Store=461e1341891f7389f7e7df762c9abfba/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:54,565 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:54,566 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,566 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,567 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:26:54,568 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1085): writing seq id for 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:54,569 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T17:26:54,569 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1102): Opened 461e1341891f7389f7e7df762c9abfba; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69901609, jitterRate=0.04161514341831207}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:26:54,570 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1001): Region open journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:26:54,570 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., pid=143, masterSystemTime=1732123614556 2024-11-20T17:26:54,572 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:54,572 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:54,572 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=461e1341891f7389f7e7df762c9abfba, regionState=OPEN, openSeqNum=2, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:54,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T17:26:54,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; OpenRegionProcedure 461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 in 168 msec 2024-11-20T17:26:54,575 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-11-20T17:26:54,575 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, ASSIGN in 321 msec 2024-11-20T17:26:54,575 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T17:26:54,575 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123614575"}]},"ts":"1732123614575"} 2024-11-20T17:26:54,576 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T17:26:54,579 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T17:26:54,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-11-20T17:26:55,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-11-20T17:26:55,537 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 141 completed 2024-11-20T17:26:55,538 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2089b1f4 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@55544bc7 2024-11-20T17:26:55,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3005670a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:55,545 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:55,546 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44652, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:55,546 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T17:26:55,547 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T17:26:55,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T17:26:55,548 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T17:26:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T17:26:55,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742339_1515 (size=999) 2024-11-20T17:26:55,958 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T17:26:55,958 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T17:26:55,960 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:26:55,961 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, REOPEN/MOVE}] 2024-11-20T17:26:55,962 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, REOPEN/MOVE 2024-11-20T17:26:55,962 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=461e1341891f7389f7e7df762c9abfba, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:55,963 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:26:55,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:26:56,114 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:56,114 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,114 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:26:56,114 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 461e1341891f7389f7e7df762c9abfba, disabling compactions & flushes 2024-11-20T17:26:56,115 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,115 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,115 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. after waiting 0 ms 2024-11-20T17:26:56,115 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,118 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T17:26:56,119 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,119 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:26:56,119 WARN [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionServer(3786): Not adding moved region record: 461e1341891f7389f7e7df762c9abfba to self. 2024-11-20T17:26:56,120 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,120 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=461e1341891f7389f7e7df762c9abfba, regionState=CLOSED 2024-11-20T17:26:56,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-20T17:26:56,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 in 158 msec 2024-11-20T17:26:56,122 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, REOPEN/MOVE; state=CLOSED, location=d514dc944523,44015,1732123455293; forceNewPlan=false, retain=true 2024-11-20T17:26:56,272 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=461e1341891f7389f7e7df762c9abfba, regionState=OPENING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=146, state=RUNNABLE; OpenRegionProcedure 461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:26:56,425 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:56,427 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,428 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7285): Opening region: {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} 2024-11-20T17:26:56,428 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,428 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T17:26:56,428 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7327): checking encryption for 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,428 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7330): checking classloading for 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,429 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,430 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:56,430 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 461e1341891f7389f7e7df762c9abfba columnFamilyName A 2024-11-20T17:26:56,431 DEBUG [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:56,431 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(327): Store=461e1341891f7389f7e7df762c9abfba/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:56,431 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,432 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:56,432 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 461e1341891f7389f7e7df762c9abfba columnFamilyName B 2024-11-20T17:26:56,432 DEBUG [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:56,432 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(327): Store=461e1341891f7389f7e7df762c9abfba/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:56,432 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,433 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T17:26:56,433 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 461e1341891f7389f7e7df762c9abfba columnFamilyName C 2024-11-20T17:26:56,433 DEBUG [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:56,433 INFO [StoreOpener-461e1341891f7389f7e7df762c9abfba-1 {}] regionserver.HStore(327): Store=461e1341891f7389f7e7df762c9abfba/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T17:26:56,433 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,434 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,434 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,435 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T17:26:56,436 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1085): writing seq id for 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,437 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1102): Opened 461e1341891f7389f7e7df762c9abfba; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61692249, jitterRate=-0.08071385324001312}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T17:26:56,438 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1001): Region open journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:26:56,438 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., pid=148, masterSystemTime=1732123616425 2024-11-20T17:26:56,439 DEBUG [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,439 INFO [RS_OPEN_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,440 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=461e1341891f7389f7e7df762c9abfba, regionState=OPEN, openSeqNum=5, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=146 2024-11-20T17:26:56,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=146, state=SUCCESS; OpenRegionProcedure 461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 in 166 msec 2024-11-20T17:26:56,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-20T17:26:56,442 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, REOPEN/MOVE in 480 msec 2024-11-20T17:26:56,443 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T17:26:56,443 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 482 msec 2024-11-20T17:26:56,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 895 msec 2024-11-20T17:26:56,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T17:26:56,446 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65f51785 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1208728f 2024-11-20T17:26:56,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@92e7af3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,450 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cc71f2e to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d0a9e33 2024-11-20T17:26:56,453 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17899883, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,453 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79a7bd2b to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40e55f2a 2024-11-20T17:26:56,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b739a35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,457 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d688bcb to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@271e8143 2024-11-20T17:26:56,460 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20bb05a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,460 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31f7e171 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62b06a95 2024-11-20T17:26:56,465 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a5ecd59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,466 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d02ace0 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61da8c1c 2024-11-20T17:26:56,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b968040, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,470 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63054209 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@560a8819 2024-11-20T17:26:56,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49019618, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,473 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fbb1399 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3df30e37 2024-11-20T17:26:56,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7887fec7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,477 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51fccca6 to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@745bf218 2024-11-20T17:26:56,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@336d4b92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,480 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x539997ae to 127.0.0.1:56028 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78f964f7 2024-11-20T17:26:56,482 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@219191a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T17:26:56,484 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:56,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees 2024-11-20T17:26:56,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T17:26:56,485 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:56,486 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:56,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:56,491 DEBUG [hconnection-0x331b6524-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,491 DEBUG [hconnection-0x20b256e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,491 DEBUG [hconnection-0x7e93ad21-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,492 DEBUG [hconnection-0x58af8b85-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,492 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44666, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,492 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,492 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44664, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,492 DEBUG [hconnection-0x22247b45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,493 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,493 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:56,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:26:56,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:56,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:26:56,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:56,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:26:56,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:56,508 DEBUG [hconnection-0x46683d9a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,508 DEBUG [hconnection-0x2b8b5373-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,509 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44708, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,509 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,510 DEBUG [hconnection-0x41fcf3ad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,511 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44730, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,511 DEBUG [hconnection-0x4959bf77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,513 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123676517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123676518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123676519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123676519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123676520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,526 DEBUG [hconnection-0x65bf6f59-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T17:26:56,527 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T17:26:56,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120831286b6d3f2492f8a815549e9ef1d2b_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123616497/Put/seqid=0 2024-11-20T17:26:56,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742340_1516 (size=12154) 2024-11-20T17:26:56,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T17:26:56,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123676621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123676621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123676622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123676622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123676622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,637 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:56,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:56,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:56,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T17:26:56,790 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:56,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:56,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:56,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123676823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123676824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123676824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:56,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123676825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123676824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:56,943 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:56,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:56,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:56,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:56,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:56,946 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:56,949 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120831286b6d3f2492f8a815549e9ef1d2b_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120831286b6d3f2492f8a815549e9ef1d2b_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:56,950 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/ef1a15c39a9f4b6786e096a6020c36bd, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:26:56,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/ef1a15c39a9f4b6786e096a6020c36bd is 175, key is test_row_0/A:col10/1732123616497/Put/seqid=0 2024-11-20T17:26:56,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742341_1517 (size=30955) 2024-11-20T17:26:57,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T17:26:57,095 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:57,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:57,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:57,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123677125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123677126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123677126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123677127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123677127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,248 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:57,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:57,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:57,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,355 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/ef1a15c39a9f4b6786e096a6020c36bd 2024-11-20T17:26:57,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/d2f78ab39ee242498792689a3902facd is 50, key is test_row_0/B:col10/1732123616497/Put/seqid=0 2024-11-20T17:26:57,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742342_1518 (size=12001) 2024-11-20T17:26:57,400 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:57,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:57,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:57,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,553 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:57,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:57,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:57,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T17:26:57,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123677629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123677629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123677630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123677632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:57,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123677633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:57,706 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:57,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:57,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:57,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/d2f78ab39ee242498792689a3902facd 2024-11-20T17:26:57,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/63c99488ff82433ea28740536796940d is 50, key is test_row_0/C:col10/1732123616497/Put/seqid=0 2024-11-20T17:26:57,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742343_1519 (size=12001) 2024-11-20T17:26:57,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/63c99488ff82433ea28740536796940d 2024-11-20T17:26:57,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/ef1a15c39a9f4b6786e096a6020c36bd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/ef1a15c39a9f4b6786e096a6020c36bd 2024-11-20T17:26:57,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/ef1a15c39a9f4b6786e096a6020c36bd, entries=150, sequenceid=16, filesize=30.2 K 2024-11-20T17:26:57,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/d2f78ab39ee242498792689a3902facd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d2f78ab39ee242498792689a3902facd 2024-11-20T17:26:57,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d2f78ab39ee242498792689a3902facd, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T17:26:57,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/63c99488ff82433ea28740536796940d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/63c99488ff82433ea28740536796940d 2024-11-20T17:26:57,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/63c99488ff82433ea28740536796940d, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T17:26:57,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 461e1341891f7389f7e7df762c9abfba in 1331ms, sequenceid=16, compaction requested=false 2024-11-20T17:26:57,831 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T17:26:57,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:26:57,858 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:57,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-11-20T17:26:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:57,859 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:26:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:26:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:26:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:26:57,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:57,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120197a59ec35284b0887d11715504593e5_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123616517/Put/seqid=0 2024-11-20T17:26:57,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742344_1520 (size=12154) 2024-11-20T17:26:57,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:57,872 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120197a59ec35284b0887d11715504593e5_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120197a59ec35284b0887d11715504593e5_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:57,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/abd3206087d04974bcdc8d4c58e3df19, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:26:57,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/abd3206087d04974bcdc8d4c58e3df19 is 175, key is test_row_0/A:col10/1732123616517/Put/seqid=0 2024-11-20T17:26:57,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742345_1521 (size=30955) 2024-11-20T17:26:58,193 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T17:26:58,277 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/abd3206087d04974bcdc8d4c58e3df19 2024-11-20T17:26:58,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/bf83bfaa1cc14d56841b69d1d71a07cf is 50, key is test_row_0/B:col10/1732123616517/Put/seqid=0 2024-11-20T17:26:58,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742346_1522 (size=12001) 2024-11-20T17:26:58,307 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/bf83bfaa1cc14d56841b69d1d71a07cf 2024-11-20T17:26:58,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/5b34f199d6da4112968c7a0315908801 is 50, key is test_row_0/C:col10/1732123616517/Put/seqid=0 2024-11-20T17:26:58,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742347_1523 (size=12001) 2024-11-20T17:26:58,317 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/5b34f199d6da4112968c7a0315908801 2024-11-20T17:26:58,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/abd3206087d04974bcdc8d4c58e3df19 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/abd3206087d04974bcdc8d4c58e3df19 2024-11-20T17:26:58,325 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/abd3206087d04974bcdc8d4c58e3df19, entries=150, sequenceid=40, filesize=30.2 K 2024-11-20T17:26:58,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/bf83bfaa1cc14d56841b69d1d71a07cf as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/bf83bfaa1cc14d56841b69d1d71a07cf 2024-11-20T17:26:58,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,329 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/bf83bfaa1cc14d56841b69d1d71a07cf, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T17:26:58,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/5b34f199d6da4112968c7a0315908801 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5b34f199d6da4112968c7a0315908801 2024-11-20T17:26:58,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,334 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5b34f199d6da4112968c7a0315908801, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T17:26:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,335 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 461e1341891f7389f7e7df762c9abfba in 476ms, sequenceid=40, compaction requested=false 2024-11-20T17:26:58,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:26:58,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:58,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=150 2024-11-20T17:26:58,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=150 2024-11-20T17:26:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-20T17:26:58,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8500 sec 2024-11-20T17:26:58,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees in 1.8540 sec 2024-11-20T17:26:58,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-20T17:26:58,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,589 INFO [Thread-2306 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-11-20T17:26:58,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,590 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:26:58,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees 2024-11-20T17:26:58,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,592 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:26:58,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T17:26:58,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,593 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:26:58,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:26:58,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:58,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T17:26:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:26:58,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:58,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:26:58,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:58,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:26:58,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:26:58,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202ce2916de38d42e69e96ae98bbc8f4ca_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123618656/Put/seqid=0 2024-11-20T17:26:58,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742349_1525 (size=24358) 2024-11-20T17:26:58,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:58,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T17:26:58,745 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:58,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:58,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:58,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:58,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:58,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:58,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:58,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123678698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123678713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:58,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123678746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123678746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123678746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123678847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123678847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123678851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123678851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:58,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123678851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:58,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T17:26:58,897 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:58,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:58,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:58,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:58,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,050 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:59,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123679050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123679051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123679054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123679055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123679055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,086 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:26:59,090 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202ce2916de38d42e69e96ae98bbc8f4ca_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202ce2916de38d42e69e96ae98bbc8f4ca_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:26:59,090 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/18a49cd84ee742beb0b8e84e698bf32a, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:26:59,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/18a49cd84ee742beb0b8e84e698bf32a is 175, key is test_row_0/A:col10/1732123618656/Put/seqid=0 2024-11-20T17:26:59,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742348_1524 (size=73995) 2024-11-20T17:26:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T17:26:59,203 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:59,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:59,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:59,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,204 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123679352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123679354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:59,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:59,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:59,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,356 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123679357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123679359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123679359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,497 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/18a49cd84ee742beb0b8e84e698bf32a 2024-11-20T17:26:59,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/51686b493c1e4d719e816fa5bdb5f8fe is 50, key is test_row_0/B:col10/1732123618656/Put/seqid=0 2024-11-20T17:26:59,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742350_1526 (size=12001) 2024-11-20T17:26:59,508 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:59,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,509 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,660 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:59,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T17:26:59,812 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:59,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123679854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123679856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123679861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123679863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:26:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123679865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:26:59,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/51686b493c1e4d719e816fa5bdb5f8fe 2024-11-20T17:26:59,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/5aeb3b87f6ad43878eb1768ab2971684 is 50, key is test_row_0/C:col10/1732123618656/Put/seqid=0 2024-11-20T17:26:59,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742351_1527 (size=12001) 2024-11-20T17:26:59,965 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:26:59,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:26:59,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:26:59,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:26:59,966 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:26:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:00,117 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:00,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:27:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:00,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:00,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:27:00,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:00,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:00,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:00,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:00,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/5aeb3b87f6ad43878eb1768ab2971684 2024-11-20T17:27:00,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/18a49cd84ee742beb0b8e84e698bf32a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/18a49cd84ee742beb0b8e84e698bf32a 2024-11-20T17:27:00,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/18a49cd84ee742beb0b8e84e698bf32a, entries=400, sequenceid=51, filesize=72.3 K 2024-11-20T17:27:00,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/51686b493c1e4d719e816fa5bdb5f8fe as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/51686b493c1e4d719e816fa5bdb5f8fe 2024-11-20T17:27:00,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/51686b493c1e4d719e816fa5bdb5f8fe, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:27:00,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/5aeb3b87f6ad43878eb1768ab2971684 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5aeb3b87f6ad43878eb1768ab2971684 2024-11-20T17:27:00,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5aeb3b87f6ad43878eb1768ab2971684, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T17:27:00,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 461e1341891f7389f7e7df762c9abfba in 1671ms, sequenceid=51, compaction requested=true 2024-11-20T17:27:00,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:00,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:27:00,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:00,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:27:00,335 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:00,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:00,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:27:00,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:00,335 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:00,335 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:00,335 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 135905 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:00,335 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/A is initiating minor compaction (all files) 2024-11-20T17:27:00,335 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/B is initiating minor compaction (all files) 2024-11-20T17:27:00,335 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/B in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,335 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/A in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,336 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d2f78ab39ee242498792689a3902facd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/bf83bfaa1cc14d56841b69d1d71a07cf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/51686b493c1e4d719e816fa5bdb5f8fe] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=35.2 K 2024-11-20T17:27:00,336 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/ef1a15c39a9f4b6786e096a6020c36bd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/abd3206087d04974bcdc8d4c58e3df19, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/18a49cd84ee742beb0b8e84e698bf32a] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=132.7 K 2024-11-20T17:27:00,336 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,336 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/ef1a15c39a9f4b6786e096a6020c36bd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/abd3206087d04974bcdc8d4c58e3df19, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/18a49cd84ee742beb0b8e84e698bf32a] 2024-11-20T17:27:00,336 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting d2f78ab39ee242498792689a3902facd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732123616497 2024-11-20T17:27:00,336 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef1a15c39a9f4b6786e096a6020c36bd, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732123616497 2024-11-20T17:27:00,336 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting bf83bfaa1cc14d56841b69d1d71a07cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732123616516 2024-11-20T17:27:00,336 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting abd3206087d04974bcdc8d4c58e3df19, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732123616516 2024-11-20T17:27:00,336 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 51686b493c1e4d719e816fa5bdb5f8fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123618656 2024-11-20T17:27:00,337 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18a49cd84ee742beb0b8e84e698bf32a, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123618651 2024-11-20T17:27:00,355 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:00,356 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#B#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:00,356 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/3b3dab61c09846499a2cb865140c5bb7 is 50, key is test_row_0/B:col10/1732123618656/Put/seqid=0 2024-11-20T17:27:00,358 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411206f8f2c39bcbb4b0589560300a824198d_461e1341891f7389f7e7df762c9abfba store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:00,361 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411206f8f2c39bcbb4b0589560300a824198d_461e1341891f7389f7e7df762c9abfba, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:00,361 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206f8f2c39bcbb4b0589560300a824198d_461e1341891f7389f7e7df762c9abfba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:00,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742352_1528 (size=12104) 2024-11-20T17:27:00,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742353_1529 (size=4469) 2024-11-20T17:27:00,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:00,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-11-20T17:27:00,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,423 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T17:27:00,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:00,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:00,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:00,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:00,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:00,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:00,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b3132b97ee2e4893945b103910e681aa_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123618697/Put/seqid=0 2024-11-20T17:27:00,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742354_1530 (size=12154) 2024-11-20T17:27:00,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:00,470 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b3132b97ee2e4893945b103910e681aa_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b3132b97ee2e4893945b103910e681aa_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:00,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/c88e13696eb846f7a12faf3adcb60d88, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:00,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/c88e13696eb846f7a12faf3adcb60d88 is 175, key is test_row_0/A:col10/1732123618697/Put/seqid=0 2024-11-20T17:27:00,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742355_1531 (size=30955) 2024-11-20T17:27:00,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T17:27:00,768 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/3b3dab61c09846499a2cb865140c5bb7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/3b3dab61c09846499a2cb865140c5bb7 2024-11-20T17:27:00,772 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#A#compaction#448 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:00,772 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/B of 461e1341891f7389f7e7df762c9abfba into 3b3dab61c09846499a2cb865140c5bb7(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:00,772 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:00,772 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/B, priority=13, startTime=1732123620335; duration=0sec 2024-11-20T17:27:00,772 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:00,772 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:B 2024-11-20T17:27:00,772 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:00,773 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/9b9760076dbe4e53a9649d0f5e0effb7 is 175, key is test_row_0/A:col10/1732123618656/Put/seqid=0 2024-11-20T17:27:00,773 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:00,773 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/C is initiating minor compaction (all files) 2024-11-20T17:27:00,773 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/C in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,774 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/63c99488ff82433ea28740536796940d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5b34f199d6da4112968c7a0315908801, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5aeb3b87f6ad43878eb1768ab2971684] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=35.2 K 2024-11-20T17:27:00,774 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 63c99488ff82433ea28740536796940d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732123616497 2024-11-20T17:27:00,775 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b34f199d6da4112968c7a0315908801, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732123616516 2024-11-20T17:27:00,775 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 5aeb3b87f6ad43878eb1768ab2971684, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123618656 2024-11-20T17:27:00,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742356_1532 (size=31058) 2024-11-20T17:27:00,782 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#C#compaction#450 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:00,782 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/9b9760076dbe4e53a9649d0f5e0effb7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/9b9760076dbe4e53a9649d0f5e0effb7 2024-11-20T17:27:00,782 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/70a11f6869be4e6c9f1ce9152d0e721e is 50, key is test_row_0/C:col10/1732123618656/Put/seqid=0 2024-11-20T17:27:00,786 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/A of 461e1341891f7389f7e7df762c9abfba into 9b9760076dbe4e53a9649d0f5e0effb7(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:00,786 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:00,786 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/A, priority=13, startTime=1732123620334; duration=0sec 2024-11-20T17:27:00,786 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:00,787 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:A 2024-11-20T17:27:00,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742357_1533 (size=12104) 2024-11-20T17:27:00,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:00,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:00,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:00,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123680870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:00,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:00,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123680872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:00,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123680873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:00,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:00,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123680874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:00,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123680873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:00,876 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/c88e13696eb846f7a12faf3adcb60d88 2024-11-20T17:27:00,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/9fde4b93322943e6bfe0f022d11c3320 is 50, key is test_row_0/B:col10/1732123618697/Put/seqid=0 2024-11-20T17:27:00,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742358_1534 (size=12001) 2024-11-20T17:27:00,889 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/9fde4b93322943e6bfe0f022d11c3320 2024-11-20T17:27:00,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/fb28edc7e00d43b1a3e2991d2060e9b2 is 50, key is test_row_0/C:col10/1732123618697/Put/seqid=0 2024-11-20T17:27:00,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742359_1535 (size=12001) 2024-11-20T17:27:00,901 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/fb28edc7e00d43b1a3e2991d2060e9b2 2024-11-20T17:27:00,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/c88e13696eb846f7a12faf3adcb60d88 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c88e13696eb846f7a12faf3adcb60d88 2024-11-20T17:27:00,909 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c88e13696eb846f7a12faf3adcb60d88, entries=150, sequenceid=76, filesize=30.2 K 2024-11-20T17:27:00,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/9fde4b93322943e6bfe0f022d11c3320 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/9fde4b93322943e6bfe0f022d11c3320 2024-11-20T17:27:00,913 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/9fde4b93322943e6bfe0f022d11c3320, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T17:27:00,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/fb28edc7e00d43b1a3e2991d2060e9b2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/fb28edc7e00d43b1a3e2991d2060e9b2 2024-11-20T17:27:00,918 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/fb28edc7e00d43b1a3e2991d2060e9b2, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T17:27:00,919 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 461e1341891f7389f7e7df762c9abfba in 496ms, sequenceid=76, compaction requested=false 2024-11-20T17:27:00,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:00,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:00,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=152 2024-11-20T17:27:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=152 2024-11-20T17:27:00,924 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-20T17:27:00,924 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3280 sec 2024-11-20T17:27:00,925 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees in 2.3340 sec 2024-11-20T17:27:00,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:00,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:27:00,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:00,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:00,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:00,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:00,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:00,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:00,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112090403f5ce3844f96b4a03523a5de235c_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123620872/Put/seqid=0 2024-11-20T17:27:00,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742360_1536 (size=14594) 2024-11-20T17:27:00,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:00,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123680995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123680997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123680998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123680999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123681100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123681102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123681102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123681103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,197 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/70a11f6869be4e6c9f1ce9152d0e721e as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70a11f6869be4e6c9f1ce9152d0e721e 2024-11-20T17:27:01,202 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/C of 461e1341891f7389f7e7df762c9abfba into 70a11f6869be4e6c9f1ce9152d0e721e(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:01,202 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:01,202 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/C, priority=13, startTime=1732123620335; duration=0sec 2024-11-20T17:27:01,202 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:01,202 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:C 2024-11-20T17:27:01,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123681303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123681305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123681306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123681306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,387 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:01,391 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112090403f5ce3844f96b4a03523a5de235c_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112090403f5ce3844f96b4a03523a5de235c_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:01,392 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/a36098c8697c44c085182d37cf172559, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:01,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/a36098c8697c44c085182d37cf172559 is 175, key is test_row_0/A:col10/1732123620872/Put/seqid=0 2024-11-20T17:27:01,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742361_1537 (size=39549) 2024-11-20T17:27:01,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123681606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123681609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123681609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:01,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123681610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:01,796 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/a36098c8697c44c085182d37cf172559 2024-11-20T17:27:01,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/648108316ecd4c859b81ca8d187d2100 is 50, key is test_row_0/B:col10/1732123620872/Put/seqid=0 2024-11-20T17:27:01,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742362_1538 (size=12001) 2024-11-20T17:27:01,817 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/648108316ecd4c859b81ca8d187d2100 2024-11-20T17:27:01,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/c672cc6d69a7433f8f1029d64ce3ffb2 is 50, key is test_row_0/C:col10/1732123620872/Put/seqid=0 2024-11-20T17:27:01,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742363_1539 (size=12001) 2024-11-20T17:27:01,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/c672cc6d69a7433f8f1029d64ce3ffb2 2024-11-20T17:27:01,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/a36098c8697c44c085182d37cf172559 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/a36098c8697c44c085182d37cf172559 2024-11-20T17:27:01,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/a36098c8697c44c085182d37cf172559, entries=200, sequenceid=91, filesize=38.6 K 2024-11-20T17:27:01,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/648108316ecd4c859b81ca8d187d2100 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/648108316ecd4c859b81ca8d187d2100 2024-11-20T17:27:01,842 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/648108316ecd4c859b81ca8d187d2100, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T17:27:01,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/c672cc6d69a7433f8f1029d64ce3ffb2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c672cc6d69a7433f8f1029d64ce3ffb2 2024-11-20T17:27:01,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c672cc6d69a7433f8f1029d64ce3ffb2, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T17:27:01,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 461e1341891f7389f7e7df762c9abfba in 870ms, sequenceid=91, compaction requested=true 2024-11-20T17:27:01,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:01,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:27:01,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:01,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:27:01,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:01,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:27:01,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:01,847 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:01,847 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:01,848 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:01,848 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:01,848 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/A is initiating minor compaction (all files) 2024-11-20T17:27:01,848 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/B is initiating minor compaction (all files) 2024-11-20T17:27:01,848 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/A in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:01,848 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/B in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:01,848 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/9b9760076dbe4e53a9649d0f5e0effb7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c88e13696eb846f7a12faf3adcb60d88, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/a36098c8697c44c085182d37cf172559] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=99.2 K 2024-11-20T17:27:01,848 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/3b3dab61c09846499a2cb865140c5bb7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/9fde4b93322943e6bfe0f022d11c3320, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/648108316ecd4c859b81ca8d187d2100] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=35.3 K 2024-11-20T17:27:01,848 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:01,848 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/9b9760076dbe4e53a9649d0f5e0effb7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c88e13696eb846f7a12faf3adcb60d88, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/a36098c8697c44c085182d37cf172559] 2024-11-20T17:27:01,848 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b9760076dbe4e53a9649d0f5e0effb7, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123618656 2024-11-20T17:27:01,848 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b3dab61c09846499a2cb865140c5bb7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123618656 2024-11-20T17:27:01,849 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting c88e13696eb846f7a12faf3adcb60d88, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123618697 2024-11-20T17:27:01,849 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fde4b93322943e6bfe0f022d11c3320, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123618697 2024-11-20T17:27:01,849 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting a36098c8697c44c085182d37cf172559, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123620869 2024-11-20T17:27:01,849 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 648108316ecd4c859b81ca8d187d2100, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123620869 2024-11-20T17:27:01,855 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:01,855 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#B#compaction#456 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:01,856 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/2e60e5b6b6594d578c093e716ec5ea01 is 50, key is test_row_0/B:col10/1732123620872/Put/seqid=0 2024-11-20T17:27:01,858 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120493fc04e5c0b4dc795017f17ef37d205_461e1341891f7389f7e7df762c9abfba store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:01,859 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120493fc04e5c0b4dc795017f17ef37d205_461e1341891f7389f7e7df762c9abfba, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:01,860 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120493fc04e5c0b4dc795017f17ef37d205_461e1341891f7389f7e7df762c9abfba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:01,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742364_1540 (size=12207) 2024-11-20T17:27:01,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742365_1541 (size=4469) 2024-11-20T17:27:01,866 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#A#compaction#457 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:01,867 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/41a8e78a73bd4acc8c94739478e9f093 is 175, key is test_row_0/A:col10/1732123620872/Put/seqid=0 2024-11-20T17:27:01,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742366_1542 (size=31161) 2024-11-20T17:27:01,875 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/41a8e78a73bd4acc8c94739478e9f093 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/41a8e78a73bd4acc8c94739478e9f093 2024-11-20T17:27:01,880 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/A of 461e1341891f7389f7e7df762c9abfba into 41a8e78a73bd4acc8c94739478e9f093(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:01,880 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:01,880 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/A, priority=13, startTime=1732123621847; duration=0sec 2024-11-20T17:27:01,880 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:01,880 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:A 2024-11-20T17:27:01,881 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:01,882 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:01,882 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/C is initiating minor compaction (all files) 2024-11-20T17:27:01,882 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/C in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:01,882 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70a11f6869be4e6c9f1ce9152d0e721e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/fb28edc7e00d43b1a3e2991d2060e9b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c672cc6d69a7433f8f1029d64ce3ffb2] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=35.3 K 2024-11-20T17:27:01,882 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70a11f6869be4e6c9f1ce9152d0e721e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732123618656 2024-11-20T17:27:01,883 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb28edc7e00d43b1a3e2991d2060e9b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732123618697 2024-11-20T17:27:01,883 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting c672cc6d69a7433f8f1029d64ce3ffb2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123620869 2024-11-20T17:27:01,891 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#C#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:01,892 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/d196d1ea2f41405792036c05e926fea8 is 50, key is test_row_0/C:col10/1732123620872/Put/seqid=0 2024-11-20T17:27:01,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742367_1543 (size=12207) 2024-11-20T17:27:01,901 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/d196d1ea2f41405792036c05e926fea8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/d196d1ea2f41405792036c05e926fea8 2024-11-20T17:27:01,907 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/C of 461e1341891f7389f7e7df762c9abfba into d196d1ea2f41405792036c05e926fea8(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:01,907 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:01,907 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/C, priority=13, startTime=1732123621847; duration=0sec 2024-11-20T17:27:01,907 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:01,907 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:C 2024-11-20T17:27:02,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:02,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:27:02,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:02,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:02,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:02,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:02,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:02,112 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:02,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ac4adabe56c84fc8972bd7cd73d819a0_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123622110/Put/seqid=0 2024-11-20T17:27:02,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742368_1544 (size=12154) 2024-11-20T17:27:02,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123682119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123682152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123682152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123682152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123682253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123682255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123682255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123682255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,268 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/2e60e5b6b6594d578c093e716ec5ea01 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2e60e5b6b6594d578c093e716ec5ea01 2024-11-20T17:27:02,272 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/B of 461e1341891f7389f7e7df762c9abfba into 2e60e5b6b6594d578c093e716ec5ea01(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:02,272 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:02,272 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/B, priority=13, startTime=1732123621847; duration=0sec 2024-11-20T17:27:02,272 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:02,272 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:B 2024-11-20T17:27:02,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123682455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123682459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123682459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123682459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,521 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:02,525 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ac4adabe56c84fc8972bd7cd73d819a0_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ac4adabe56c84fc8972bd7cd73d819a0_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:02,526 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/647ba7d3d16f448d97a0e53696aa5b45, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:02,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/647ba7d3d16f448d97a0e53696aa5b45 is 175, key is test_row_0/A:col10/1732123622110/Put/seqid=0 2024-11-20T17:27:02,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742369_1545 (size=30955) 2024-11-20T17:27:02,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T17:27:02,697 INFO [Thread-2306 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-11-20T17:27:02,698 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:27:02,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-11-20T17:27:02,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-20T17:27:02,700 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:27:02,700 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:27:02,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:27:02,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123682756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123682762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123682763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123682763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-20T17:27:02,852 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:02,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-20T17:27:02,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:02,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:02,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:02,853 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:02,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:02,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:02,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123682890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:02,891 DEBUG [Thread-2296 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:27:02,930 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/647ba7d3d16f448d97a0e53696aa5b45 2024-11-20T17:27:02,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/d007dc44d83d4ce59d222d0b1345f57d is 50, key is test_row_0/B:col10/1732123622110/Put/seqid=0 2024-11-20T17:27:02,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742370_1546 (size=12001) 2024-11-20T17:27:03,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-20T17:27:03,005 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:03,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-20T17:27:03,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:03,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,157 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:03,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-20T17:27:03,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:03,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123683259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:03,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123683265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:03,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123683265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:03,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:03,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123683266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:03,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-20T17:27:03,309 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:03,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-20T17:27:03,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:03,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/d007dc44d83d4ce59d222d0b1345f57d 2024-11-20T17:27:03,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/9254c607398948c585a5b395761faa67 is 50, key is test_row_0/C:col10/1732123622110/Put/seqid=0 2024-11-20T17:27:03,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742371_1547 (size=12001) 2024-11-20T17:27:03,461 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:03,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-20T17:27:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,614 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:03,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-20T17:27:03,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:03,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:03,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/9254c607398948c585a5b395761faa67 2024-11-20T17:27:03,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/647ba7d3d16f448d97a0e53696aa5b45 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/647ba7d3d16f448d97a0e53696aa5b45 2024-11-20T17:27:03,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/647ba7d3d16f448d97a0e53696aa5b45, entries=150, sequenceid=118, filesize=30.2 K 2024-11-20T17:27:03,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/d007dc44d83d4ce59d222d0b1345f57d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d007dc44d83d4ce59d222d0b1345f57d 2024-11-20T17:27:03,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d007dc44d83d4ce59d222d0b1345f57d, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T17:27:03,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/9254c607398948c585a5b395761faa67 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9254c607398948c585a5b395761faa67 2024-11-20T17:27:03,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9254c607398948c585a5b395761faa67, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T17:27:03,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 461e1341891f7389f7e7df762c9abfba in 1655ms, sequenceid=118, compaction requested=false 2024-11-20T17:27:03,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:03,767 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:03,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-11-20T17:27:03,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:03,767 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T17:27:03,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:03,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:03,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:03,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:03,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:03,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:03,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d004448d511a47ffb715924c3ae33414_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123622119/Put/seqid=0 2024-11-20T17:27:03,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742372_1548 (size=12204) 2024-11-20T17:27:03,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:03,780 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d004448d511a47ffb715924c3ae33414_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d004448d511a47ffb715924c3ae33414_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:03,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/6cebcbd842c44ed3b0213ebbb6293b26, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:03,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/6cebcbd842c44ed3b0213ebbb6293b26 is 175, key is test_row_0/A:col10/1732123622119/Put/seqid=0 2024-11-20T17:27:03,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742373_1549 (size=31005) 2024-11-20T17:27:03,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-20T17:27:04,190 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/6cebcbd842c44ed3b0213ebbb6293b26 2024-11-20T17:27:04,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/2139bbaf83bf44c5bc3098c2372b0ddf is 50, key is test_row_0/B:col10/1732123622119/Put/seqid=0 2024-11-20T17:27:04,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742374_1550 (size=12051) 2024-11-20T17:27:04,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:04,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:04,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123684318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123684318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123684319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123684319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123684422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123684423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123684423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123684423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,600 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/2139bbaf83bf44c5bc3098c2372b0ddf 2024-11-20T17:27:04,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/074318c71fd54f49bbccb84a4f938b74 is 50, key is test_row_0/C:col10/1732123622119/Put/seqid=0 2024-11-20T17:27:04,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742375_1551 (size=12051) 2024-11-20T17:27:04,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123684624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123684625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123684625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123684626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-20T17:27:04,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123684927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123684928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123684930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:04,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:04,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123684930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,010 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/074318c71fd54f49bbccb84a4f938b74 2024-11-20T17:27:05,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/6cebcbd842c44ed3b0213ebbb6293b26 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/6cebcbd842c44ed3b0213ebbb6293b26 2024-11-20T17:27:05,018 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/6cebcbd842c44ed3b0213ebbb6293b26, entries=150, sequenceid=132, filesize=30.3 K 2024-11-20T17:27:05,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/2139bbaf83bf44c5bc3098c2372b0ddf as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2139bbaf83bf44c5bc3098c2372b0ddf 2024-11-20T17:27:05,021 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2139bbaf83bf44c5bc3098c2372b0ddf, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T17:27:05,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/074318c71fd54f49bbccb84a4f938b74 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/074318c71fd54f49bbccb84a4f938b74 2024-11-20T17:27:05,025 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/074318c71fd54f49bbccb84a4f938b74, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T17:27:05,026 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 461e1341891f7389f7e7df762c9abfba in 1259ms, sequenceid=132, compaction requested=true 2024-11-20T17:27:05,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:05,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:05,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-11-20T17:27:05,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-11-20T17:27:05,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-20T17:27:05,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3270 sec 2024-11-20T17:27:05,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 2.3310 sec 2024-11-20T17:27:05,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:05,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:27:05,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:05,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:05,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:05,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:05,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:05,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:05,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207f09860073f140b1abdeba24f7994e3e_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123625432/Put/seqid=0 2024-11-20T17:27:05,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123685440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742376_1552 (size=14794) 2024-11-20T17:27:05,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123685442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,445 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:05,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123685442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123685443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,448 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207f09860073f140b1abdeba24f7994e3e_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f09860073f140b1abdeba24f7994e3e_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:05,449 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/11321aae73024503be8c40774403b5b4, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:05,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/11321aae73024503be8c40774403b5b4 is 175, key is test_row_0/A:col10/1732123625432/Put/seqid=0 2024-11-20T17:27:05,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742377_1553 (size=39749) 2024-11-20T17:27:05,453 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/11321aae73024503be8c40774403b5b4 2024-11-20T17:27:05,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/36cde178663440f28b2d8bdc4f3bd241 is 50, key is test_row_0/B:col10/1732123625432/Put/seqid=0 2024-11-20T17:27:05,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742378_1554 (size=12151) 2024-11-20T17:27:05,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123685544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123685545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123685546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123685546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123685747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123685748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123685749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123685749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:05,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/36cde178663440f28b2d8bdc4f3bd241 2024-11-20T17:27:05,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f3d3a86fe63c4cffa80131912514f3f0 is 50, key is test_row_0/C:col10/1732123625432/Put/seqid=0 2024-11-20T17:27:05,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742379_1555 (size=12151) 2024-11-20T17:27:06,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123686052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123686052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123686052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123686053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f3d3a86fe63c4cffa80131912514f3f0 2024-11-20T17:27:06,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/11321aae73024503be8c40774403b5b4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/11321aae73024503be8c40774403b5b4 2024-11-20T17:27:06,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/11321aae73024503be8c40774403b5b4, entries=200, sequenceid=157, filesize=38.8 K 2024-11-20T17:27:06,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/36cde178663440f28b2d8bdc4f3bd241 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/36cde178663440f28b2d8bdc4f3bd241 2024-11-20T17:27:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/36cde178663440f28b2d8bdc4f3bd241, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T17:27:06,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f3d3a86fe63c4cffa80131912514f3f0 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f3d3a86fe63c4cffa80131912514f3f0 2024-11-20T17:27:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f3d3a86fe63c4cffa80131912514f3f0, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T17:27:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 461e1341891f7389f7e7df762c9abfba in 862ms, sequenceid=157, compaction requested=true 2024-11-20T17:27:06,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,296 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:27:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:27:06,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:06,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:27:06,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:06,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:27:06,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:06,296 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:27:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,297 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132870 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:27:06,297 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:27:06,298 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/A is initiating minor compaction (all files) 2024-11-20T17:27:06,298 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/B is initiating minor compaction (all files) 2024-11-20T17:27:06,298 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/A in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:06,298 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/B in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:06,298 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/41a8e78a73bd4acc8c94739478e9f093, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/647ba7d3d16f448d97a0e53696aa5b45, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/6cebcbd842c44ed3b0213ebbb6293b26, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/11321aae73024503be8c40774403b5b4] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=129.8 K 2024-11-20T17:27:06,298 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2e60e5b6b6594d578c093e716ec5ea01, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d007dc44d83d4ce59d222d0b1345f57d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2139bbaf83bf44c5bc3098c2372b0ddf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/36cde178663440f28b2d8bdc4f3bd241] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=47.3 K 2024-11-20T17:27:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,298 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:06,298 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/41a8e78a73bd4acc8c94739478e9f093, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/647ba7d3d16f448d97a0e53696aa5b45, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/6cebcbd842c44ed3b0213ebbb6293b26, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/11321aae73024503be8c40774403b5b4] 2024-11-20T17:27:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,298 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 41a8e78a73bd4acc8c94739478e9f093, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123620869 2024-11-20T17:27:06,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,298 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e60e5b6b6594d578c093e716ec5ea01, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123620869 2024-11-20T17:27:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,299 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 647ba7d3d16f448d97a0e53696aa5b45, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123620996 2024-11-20T17:27:06,299 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting d007dc44d83d4ce59d222d0b1345f57d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123620996 2024-11-20T17:27:06,299 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 6cebcbd842c44ed3b0213ebbb6293b26, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732123622117 2024-11-20T17:27:06,299 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2139bbaf83bf44c5bc3098c2372b0ddf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732123622117 2024-11-20T17:27:06,299 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 11321aae73024503be8c40774403b5b4, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123624313 2024-11-20T17:27:06,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,299 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36cde178663440f28b2d8bdc4f3bd241, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123624318 2024-11-20T17:27:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,306 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:06,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,308 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#B#compaction#468 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,308 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204565899f276e4d91b48ca448d6251809_461e1341891f7389f7e7df762c9abfba store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:06,308 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/a4ad9e5cb44441d9b58216cd98914f07 is 50, key is test_row_0/B:col10/1732123625432/Put/seqid=0 2024-11-20T17:27:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,311 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204565899f276e4d91b48ca448d6251809_461e1341891f7389f7e7df762c9abfba, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,311 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204565899f276e4d91b48ca448d6251809_461e1341891f7389f7e7df762c9abfba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742380_1556 (size=12493) 2024-11-20T17:27:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742381_1557 (size=4469) 2024-11-20T17:27:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:06,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T17:27:06,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:06,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:06,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:06,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:06,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:06,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:06,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202ed1f4d6769f4c08a6b10cebf6cf9473_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123626558/Put/seqid=0 2024-11-20T17:27:06,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742382_1558 (size=12304) 2024-11-20T17:27:06,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,570 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,573 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202ed1f4d6769f4c08a6b10cebf6cf9473_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202ed1f4d6769f4c08a6b10cebf6cf9473_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:06,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,574 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/8b844c58ef794ac2bfdea4556e5ef5d4, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:06,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/8b844c58ef794ac2bfdea4556e5ef5d4 is 175, key is test_row_0/A:col10/1732123626558/Put/seqid=0 2024-11-20T17:27:06,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742383_1559 (size=31101) 2024-11-20T17:27:06,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,580 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/8b844c58ef794ac2bfdea4556e5ef5d4 2024-11-20T17:27:06,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:06,587 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/0284923f57c140d5bb17dfae965d9b31 is 50, key is test_row_0/B:col10/1732123626558/Put/seqid=0 2024-11-20T17:27:06,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742384_1560 (size=12147) 2024-11-20T17:27:06,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123686598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123686600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123686603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123686604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123686705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123686705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123686708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123686708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,717 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/a4ad9e5cb44441d9b58216cd98914f07 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/a4ad9e5cb44441d9b58216cd98914f07 2024-11-20T17:27:06,717 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#A#compaction#469 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:06,718 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/5a3c9fb6d60c4cf5ac159c1213d75169 is 175, key is test_row_0/A:col10/1732123625432/Put/seqid=0 2024-11-20T17:27:06,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742385_1561 (size=31447) 2024-11-20T17:27:06,722 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/B of 461e1341891f7389f7e7df762c9abfba into a4ad9e5cb44441d9b58216cd98914f07(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:06,722 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:06,722 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/B, priority=12, startTime=1732123626296; duration=0sec 2024-11-20T17:27:06,722 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:06,722 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:B 2024-11-20T17:27:06,722 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:27:06,723 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:27:06,723 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/C is initiating minor compaction (all files) 2024-11-20T17:27:06,723 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/C in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:06,724 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/d196d1ea2f41405792036c05e926fea8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9254c607398948c585a5b395761faa67, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/074318c71fd54f49bbccb84a4f938b74, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f3d3a86fe63c4cffa80131912514f3f0] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=47.3 K 2024-11-20T17:27:06,724 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting d196d1ea2f41405792036c05e926fea8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732123620869 2024-11-20T17:27:06,724 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9254c607398948c585a5b395761faa67, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732123620996 2024-11-20T17:27:06,725 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 074318c71fd54f49bbccb84a4f938b74, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732123622117 2024-11-20T17:27:06,725 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3d3a86fe63c4cffa80131912514f3f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123624318 2024-11-20T17:27:06,732 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#C#compaction#472 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:06,733 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/70f012b394df499c93a0c77eb4425d4a is 50, key is test_row_0/C:col10/1732123625432/Put/seqid=0 2024-11-20T17:27:06,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742386_1562 (size=12493) 2024-11-20T17:27:06,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-11-20T17:27:06,804 INFO [Thread-2306 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-11-20T17:27:06,805 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:27:06,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-11-20T17:27:06,807 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:27:06,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:27:06,807 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:27:06,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:27:06,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44734 deadline: 1732123686894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,896 DEBUG [Thread-2296 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., hostname=d514dc944523,44015,1732123455293, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T17:27:06,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:27:06,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123686908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123686908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123686911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:06,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123686911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:06,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:06,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:27:06,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:06,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:06,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:06,960 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:06,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:06,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:06,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/0284923f57c140d5bb17dfae965d9b31 2024-11-20T17:27:07,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/ef3371d00d584728b7238c938670744f is 50, key is test_row_0/C:col10/1732123626558/Put/seqid=0 2024-11-20T17:27:07,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742387_1563 (size=9757) 2024-11-20T17:27:07,004 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/ef3371d00d584728b7238c938670744f 2024-11-20T17:27:07,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/8b844c58ef794ac2bfdea4556e5ef5d4 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8b844c58ef794ac2bfdea4556e5ef5d4 2024-11-20T17:27:07,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8b844c58ef794ac2bfdea4556e5ef5d4, entries=150, sequenceid=169, filesize=30.4 K 2024-11-20T17:27:07,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/0284923f57c140d5bb17dfae965d9b31 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/0284923f57c140d5bb17dfae965d9b31 2024-11-20T17:27:07,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/0284923f57c140d5bb17dfae965d9b31, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T17:27:07,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/ef3371d00d584728b7238c938670744f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/ef3371d00d584728b7238c938670744f 2024-11-20T17:27:07,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/ef3371d00d584728b7238c938670744f, entries=100, sequenceid=169, filesize=9.5 K 2024-11-20T17:27:07,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 461e1341891f7389f7e7df762c9abfba in 461ms, sequenceid=169, compaction requested=false 2024-11-20T17:27:07,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:07,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:27:07,112 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:07,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-11-20T17:27:07,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:07,113 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T17:27:07,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:07,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:07,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:07,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:07,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:07,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:07,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bea376097c9d47af9c095a7720bfe84a_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123626597/Put/seqid=0 2024-11-20T17:27:07,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742388_1564 (size=12304) 2024-11-20T17:27:07,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:07,126 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/5a3c9fb6d60c4cf5ac159c1213d75169 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/5a3c9fb6d60c4cf5ac159c1213d75169 2024-11-20T17:27:07,126 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bea376097c9d47af9c095a7720bfe84a_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bea376097c9d47af9c095a7720bfe84a_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:07,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/15f55ad203ed432e82686d9884cbe73b, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:07,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/15f55ad203ed432e82686d9884cbe73b is 175, key is test_row_0/A:col10/1732123626597/Put/seqid=0 2024-11-20T17:27:07,130 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/A of 461e1341891f7389f7e7df762c9abfba into 5a3c9fb6d60c4cf5ac159c1213d75169(size=30.7 K), total size for store is 61.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:07,130 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:07,130 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/A, priority=12, startTime=1732123626295; duration=0sec 2024-11-20T17:27:07,131 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:07,131 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:A 2024-11-20T17:27:07,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742389_1565 (size=31105) 2024-11-20T17:27:07,141 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/70f012b394df499c93a0c77eb4425d4a as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70f012b394df499c93a0c77eb4425d4a 2024-11-20T17:27:07,146 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/C of 461e1341891f7389f7e7df762c9abfba into 70f012b394df499c93a0c77eb4425d4a(size=12.2 K), total size for store is 21.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:07,146 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:07,146 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/C, priority=12, startTime=1732123626296; duration=0sec 2024-11-20T17:27:07,146 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:07,146 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:C 2024-11-20T17:27:07,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:07,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:07,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123687220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123687222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123687222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123687223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123687323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123687324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123687326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123687326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:27:07,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123687526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123687527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123687529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123687529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,537 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/15f55ad203ed432e82686d9884cbe73b 2024-11-20T17:27:07,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/4c3a2e8b5fbb48e79314b6e838a9b855 is 50, key is test_row_0/B:col10/1732123626597/Put/seqid=0 2024-11-20T17:27:07,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742390_1566 (size=12151) 2024-11-20T17:27:07,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123687828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123687830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123687831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123687833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:07,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:27:07,947 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/4c3a2e8b5fbb48e79314b6e838a9b855 2024-11-20T17:27:07,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/db609151561242758190b1c93dd24dfd is 50, key is test_row_0/C:col10/1732123626597/Put/seqid=0 2024-11-20T17:27:07,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742391_1567 (size=12151) 2024-11-20T17:27:07,963 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/db609151561242758190b1c93dd24dfd 2024-11-20T17:27:07,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/15f55ad203ed432e82686d9884cbe73b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/15f55ad203ed432e82686d9884cbe73b 2024-11-20T17:27:07,969 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/15f55ad203ed432e82686d9884cbe73b, entries=150, sequenceid=194, filesize=30.4 K 2024-11-20T17:27:07,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/4c3a2e8b5fbb48e79314b6e838a9b855 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/4c3a2e8b5fbb48e79314b6e838a9b855 2024-11-20T17:27:07,973 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/4c3a2e8b5fbb48e79314b6e838a9b855, entries=150, sequenceid=194, filesize=11.9 K 2024-11-20T17:27:07,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/db609151561242758190b1c93dd24dfd as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/db609151561242758190b1c93dd24dfd 2024-11-20T17:27:07,976 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/db609151561242758190b1c93dd24dfd, entries=150, sequenceid=194, filesize=11.9 K 2024-11-20T17:27:07,977 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 461e1341891f7389f7e7df762c9abfba in 864ms, sequenceid=194, compaction requested=true 2024-11-20T17:27:07,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:07,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:07,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-20T17:27:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-11-20T17:27:07,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-20T17:27:07,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1710 sec 2024-11-20T17:27:07,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 1.1750 sec 2024-11-20T17:27:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:08,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:27:08,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:08,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:08,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:08,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:08,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:08,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:08,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112086fc7db12d0e4425add4c2a70bd3aca9_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123627221/Put/seqid=0 2024-11-20T17:27:08,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742392_1568 (size=12304) 2024-11-20T17:27:08,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123688352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123688353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123688354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123688354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123688456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123688456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123688458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123688458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123688659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123688659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123688660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123688661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,744 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:08,747 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112086fc7db12d0e4425add4c2a70bd3aca9_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112086fc7db12d0e4425add4c2a70bd3aca9_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:08,747 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/08890484f78740a29941a43219a164ff, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:08,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/08890484f78740a29941a43219a164ff is 175, key is test_row_0/A:col10/1732123627221/Put/seqid=0 2024-11-20T17:27:08,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742393_1569 (size=31105) 2024-11-20T17:27:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-20T17:27:08,911 INFO [Thread-2306 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-20T17:27:08,912 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:27:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-20T17:27:08,914 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:27:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:27:08,914 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:27:08,915 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:27:08,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123688961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123688962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123688962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:08,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:08,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123688965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:09,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:27:09,066 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:09,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:27:09,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,152 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/08890484f78740a29941a43219a164ff 2024-11-20T17:27:09,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/e23948382fb043e79157be12f48d39e1 is 50, key is test_row_0/B:col10/1732123627221/Put/seqid=0 2024-11-20T17:27:09,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742394_1570 (size=12151) 2024-11-20T17:27:09,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:27:09,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:09,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:27:09,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:09,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,371 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:09,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:27:09,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:09,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:09,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123689465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:09,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:09,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123689466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:09,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:09,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123689466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:09,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:09,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123689469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:09,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:27:09,524 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:09,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:27:09,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:09,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,569 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/e23948382fb043e79157be12f48d39e1 2024-11-20T17:27:09,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/a8c8f6a5d5df4f099d31661970cbdabe is 50, key is test_row_0/C:col10/1732123627221/Put/seqid=0 2024-11-20T17:27:09,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742395_1571 (size=12151) 2024-11-20T17:27:09,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:09,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:27:09,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:09,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:09,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:27:09,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:09,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,980 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/a8c8f6a5d5df4f099d31661970cbdabe 2024-11-20T17:27:09,981 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:09,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:27:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:09,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/08890484f78740a29941a43219a164ff as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/08890484f78740a29941a43219a164ff 2024-11-20T17:27:09,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/08890484f78740a29941a43219a164ff, entries=150, sequenceid=210, filesize=30.4 K 2024-11-20T17:27:09,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/e23948382fb043e79157be12f48d39e1 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/e23948382fb043e79157be12f48d39e1 2024-11-20T17:27:09,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/e23948382fb043e79157be12f48d39e1, entries=150, sequenceid=210, filesize=11.9 K 2024-11-20T17:27:09,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/a8c8f6a5d5df4f099d31661970cbdabe as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/a8c8f6a5d5df4f099d31661970cbdabe 2024-11-20T17:27:09,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/a8c8f6a5d5df4f099d31661970cbdabe, entries=150, sequenceid=210, filesize=11.9 K 2024-11-20T17:27:09,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 461e1341891f7389f7e7df762c9abfba in 1662ms, sequenceid=210, compaction requested=true 2024-11-20T17:27:09,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:09,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:27:09,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:09,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:27:09,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:09,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:27:09,995 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:27:09,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:09,995 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:27:09,996 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124758 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:27:09,996 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48942 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:27:09,997 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/A is initiating minor compaction (all files) 2024-11-20T17:27:09,997 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/B is initiating minor compaction (all files) 2024-11-20T17:27:09,997 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/A in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,997 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/B in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,997 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/5a3c9fb6d60c4cf5ac159c1213d75169, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8b844c58ef794ac2bfdea4556e5ef5d4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/15f55ad203ed432e82686d9884cbe73b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/08890484f78740a29941a43219a164ff] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=121.8 K 2024-11-20T17:27:09,997 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/a4ad9e5cb44441d9b58216cd98914f07, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/0284923f57c140d5bb17dfae965d9b31, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/4c3a2e8b5fbb48e79314b6e838a9b855, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/e23948382fb043e79157be12f48d39e1] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=47.8 K 2024-11-20T17:27:09,997 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:09,997 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/5a3c9fb6d60c4cf5ac159c1213d75169, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8b844c58ef794ac2bfdea4556e5ef5d4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/15f55ad203ed432e82686d9884cbe73b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/08890484f78740a29941a43219a164ff] 2024-11-20T17:27:09,997 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a3c9fb6d60c4cf5ac159c1213d75169, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123624318 2024-11-20T17:27:09,997 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a4ad9e5cb44441d9b58216cd98914f07, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123624318 2024-11-20T17:27:09,998 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 0284923f57c140d5bb17dfae965d9b31, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732123625440 2024-11-20T17:27:09,998 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b844c58ef794ac2bfdea4556e5ef5d4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732123625440 2024-11-20T17:27:09,998 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c3a2e8b5fbb48e79314b6e838a9b855, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732123626597 2024-11-20T17:27:09,998 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15f55ad203ed432e82686d9884cbe73b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732123626597 2024-11-20T17:27:09,998 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08890484f78740a29941a43219a164ff, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732123627221 2024-11-20T17:27:09,998 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting e23948382fb043e79157be12f48d39e1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732123627221 2024-11-20T17:27:10,005 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:10,006 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#B#compaction#480 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:10,006 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/213aeaf23ecd46adbd8f7104f47d6262 is 50, key is test_row_0/B:col10/1732123627221/Put/seqid=0 2024-11-20T17:27:10,006 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411208c6ae16e1f204ac88a1c943d5bc54c4c_461e1341891f7389f7e7df762c9abfba store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:10,009 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411208c6ae16e1f204ac88a1c943d5bc54c4c_461e1341891f7389f7e7df762c9abfba, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:10,009 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208c6ae16e1f204ac88a1c943d5bc54c4c_461e1341891f7389f7e7df762c9abfba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:10,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742397_1573 (size=4469) 2024-11-20T17:27:10,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742396_1572 (size=12629) 2024-11-20T17:27:10,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:27:10,134 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:10,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-20T17:27:10,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:10,134 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T17:27:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:10,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:10,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203117f867f36647f281039074a1f7297f_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123628349/Put/seqid=0 2024-11-20T17:27:10,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742398_1574 (size=12304) 2024-11-20T17:27:10,417 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#A#compaction#481 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:10,418 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/c5a6e7fa2206400f91dba53f20aa5c75 is 175, key is test_row_0/A:col10/1732123627221/Put/seqid=0 2024-11-20T17:27:10,421 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/213aeaf23ecd46adbd8f7104f47d6262 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/213aeaf23ecd46adbd8f7104f47d6262 2024-11-20T17:27:10,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742399_1575 (size=31583) 2024-11-20T17:27:10,426 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/B of 461e1341891f7389f7e7df762c9abfba into 213aeaf23ecd46adbd8f7104f47d6262(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:10,426 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:10,426 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/B, priority=12, startTime=1732123629995; duration=0sec 2024-11-20T17:27:10,427 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/c5a6e7fa2206400f91dba53f20aa5c75 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c5a6e7fa2206400f91dba53f20aa5c75 2024-11-20T17:27:10,427 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:10,427 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:B 2024-11-20T17:27:10,427 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T17:27:10,429 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T17:27:10,430 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/C is initiating minor compaction (all files) 2024-11-20T17:27:10,430 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/C in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:10,430 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70f012b394df499c93a0c77eb4425d4a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/ef3371d00d584728b7238c938670744f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/db609151561242758190b1c93dd24dfd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/a8c8f6a5d5df4f099d31661970cbdabe] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=45.5 K 2024-11-20T17:27:10,430 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 70f012b394df499c93a0c77eb4425d4a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732123624318 2024-11-20T17:27:10,430 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting ef3371d00d584728b7238c938670744f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732123625441 2024-11-20T17:27:10,431 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting db609151561242758190b1c93dd24dfd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732123626597 2024-11-20T17:27:10,431 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting a8c8f6a5d5df4f099d31661970cbdabe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732123627221 2024-11-20T17:27:10,432 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/A of 461e1341891f7389f7e7df762c9abfba into c5a6e7fa2206400f91dba53f20aa5c75(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:10,432 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:10,433 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/A, priority=12, startTime=1732123629995; duration=0sec 2024-11-20T17:27:10,433 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:10,433 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:A 2024-11-20T17:27:10,438 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#C#compaction#483 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:10,439 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/9a6a950350eb4dbf947f51821f76416c is 50, key is test_row_0/C:col10/1732123627221/Put/seqid=0 2024-11-20T17:27:10,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742400_1576 (size=12629) 2024-11-20T17:27:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:10,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:10,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123690477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123690478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123690478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123690479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:10,549 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203117f867f36647f281039074a1f7297f_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203117f867f36647f281039074a1f7297f_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:10,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/4ec4072c860d4da99f96a9d227681f96, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:10,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/4ec4072c860d4da99f96a9d227681f96 is 175, key is test_row_0/A:col10/1732123628349/Put/seqid=0 2024-11-20T17:27:10,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742401_1577 (size=31105) 2024-11-20T17:27:10,558 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/4ec4072c860d4da99f96a9d227681f96 2024-11-20T17:27:10,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/b3ea5a58cc144676a0a53df54d81c8c7 is 50, key is test_row_0/B:col10/1732123628349/Put/seqid=0 2024-11-20T17:27:10,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742402_1578 (size=12151) 2024-11-20T17:27:10,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123690580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123690580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123690581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123690582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123690782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123690783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123690784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:10,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123690784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:10,862 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/9a6a950350eb4dbf947f51821f76416c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9a6a950350eb4dbf947f51821f76416c 2024-11-20T17:27:10,866 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/C of 461e1341891f7389f7e7df762c9abfba into 9a6a950350eb4dbf947f51821f76416c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:10,866 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:10,866 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/C, priority=12, startTime=1732123629995; duration=0sec 2024-11-20T17:27:10,866 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:10,866 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:C 2024-11-20T17:27:10,967 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/b3ea5a58cc144676a0a53df54d81c8c7 2024-11-20T17:27:10,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f9080c5f8d994833b75b137c0d99be17 is 50, key is test_row_0/C:col10/1732123628349/Put/seqid=0 2024-11-20T17:27:10,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742403_1579 (size=12151) 2024-11-20T17:27:10,977 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f9080c5f8d994833b75b137c0d99be17 2024-11-20T17:27:10,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/4ec4072c860d4da99f96a9d227681f96 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/4ec4072c860d4da99f96a9d227681f96 2024-11-20T17:27:10,989 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/4ec4072c860d4da99f96a9d227681f96, entries=150, sequenceid=233, filesize=30.4 K 2024-11-20T17:27:10,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/b3ea5a58cc144676a0a53df54d81c8c7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/b3ea5a58cc144676a0a53df54d81c8c7 2024-11-20T17:27:10,993 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/b3ea5a58cc144676a0a53df54d81c8c7, entries=150, sequenceid=233, filesize=11.9 K 2024-11-20T17:27:10,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f9080c5f8d994833b75b137c0d99be17 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f9080c5f8d994833b75b137c0d99be17 2024-11-20T17:27:10,997 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f9080c5f8d994833b75b137c0d99be17, entries=150, sequenceid=233, filesize=11.9 K 2024-11-20T17:27:10,998 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 461e1341891f7389f7e7df762c9abfba in 864ms, sequenceid=233, compaction requested=false 2024-11-20T17:27:10,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:10,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:10,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-20T17:27:10,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-20T17:27:11,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-20T17:27:11,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0840 sec 2024-11-20T17:27:11,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.0890 sec 2024-11-20T17:27:11,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-20T17:27:11,018 INFO [Thread-2306 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-20T17:27:11,019 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:27:11,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-20T17:27:11,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:27:11,021 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:27:11,021 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:27:11,021 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:27:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:11,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T17:27:11,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:11,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:11,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:11,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:11,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:11,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:11,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aa46eb11841c4366ac0aeff01401f251_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123630477/Put/seqid=0 2024-11-20T17:27:11,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742404_1580 (size=17284) 2024-11-20T17:27:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:27:11,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123691131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123691131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123691132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123691132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,172 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:11,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:11,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:11,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,173 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123691235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123691235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123691235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123691235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:27:11,325 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:11,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:11,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:11,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123691439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123691439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123691439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123691439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,477 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:11,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:11,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:11,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,499 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:11,502 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120aa46eb11841c4366ac0aeff01401f251_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aa46eb11841c4366ac0aeff01401f251_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:11,503 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/655aed42ba9743fbb7d8f7f26f014f6b, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:11,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/655aed42ba9743fbb7d8f7f26f014f6b is 175, key is test_row_0/A:col10/1732123630477/Put/seqid=0 2024-11-20T17:27:11,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742405_1581 (size=48389) 2024-11-20T17:27:11,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:27:11,630 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:11,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:11,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:11,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,631 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123691740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123691741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123691742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123691742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:11,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:11,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:11,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:11,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,908 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/655aed42ba9743fbb7d8f7f26f014f6b 2024-11-20T17:27:11,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/5acb683a66d3415cb43d1a268c1d9aaa is 50, key is test_row_0/B:col10/1732123630477/Put/seqid=0 2024-11-20T17:27:11,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742406_1582 (size=12151) 2024-11-20T17:27:11,935 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:11,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:11,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:11,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:11,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:11,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,087 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:12,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:27:12,240 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:12,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:12,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:12,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:12,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123692245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:12,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:12,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123692245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:12,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:12,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123692246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:12,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:12,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123692246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:12,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/5acb683a66d3415cb43d1a268c1d9aaa 2024-11-20T17:27:12,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/c71480e181c54534aea82dce3038fdd5 is 50, key is test_row_0/C:col10/1732123630477/Put/seqid=0 2024-11-20T17:27:12,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742407_1583 (size=12151) 2024-11-20T17:27:12,392 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:12,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:12,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:12,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:12,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:12,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:12,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:12,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:12,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T17:27:12,729 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/c71480e181c54534aea82dce3038fdd5 2024-11-20T17:27:12,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/655aed42ba9743fbb7d8f7f26f014f6b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/655aed42ba9743fbb7d8f7f26f014f6b 2024-11-20T17:27:12,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/655aed42ba9743fbb7d8f7f26f014f6b, entries=250, sequenceid=251, filesize=47.3 K 2024-11-20T17:27:12,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/5acb683a66d3415cb43d1a268c1d9aaa as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/5acb683a66d3415cb43d1a268c1d9aaa 2024-11-20T17:27:12,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/5acb683a66d3415cb43d1a268c1d9aaa, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T17:27:12,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/c71480e181c54534aea82dce3038fdd5 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c71480e181c54534aea82dce3038fdd5 2024-11-20T17:27:12,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c71480e181c54534aea82dce3038fdd5, entries=150, sequenceid=251, filesize=11.9 K 2024-11-20T17:27:12,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 461e1341891f7389f7e7df762c9abfba in 1665ms, sequenceid=251, compaction requested=true 2024-11-20T17:27:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T17:27:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T17:27:12,752 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 461e1341891f7389f7e7df762c9abfba:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T17:27:12,752 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:12,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:12,753 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:12,753 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111077 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:12,753 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/B is initiating minor compaction (all files) 2024-11-20T17:27:12,753 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/A is initiating minor compaction (all files) 2024-11-20T17:27:12,753 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/A in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,753 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/B in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,753 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c5a6e7fa2206400f91dba53f20aa5c75, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/4ec4072c860d4da99f96a9d227681f96, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/655aed42ba9743fbb7d8f7f26f014f6b] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=108.5 K 2024-11-20T17:27:12,753 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/213aeaf23ecd46adbd8f7104f47d6262, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/b3ea5a58cc144676a0a53df54d81c8c7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/5acb683a66d3415cb43d1a268c1d9aaa] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=36.1 K 2024-11-20T17:27:12,753 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,753 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. files: [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c5a6e7fa2206400f91dba53f20aa5c75, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/4ec4072c860d4da99f96a9d227681f96, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/655aed42ba9743fbb7d8f7f26f014f6b] 2024-11-20T17:27:12,756 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 213aeaf23ecd46adbd8f7104f47d6262, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732123627221 2024-11-20T17:27:12,756 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5a6e7fa2206400f91dba53f20aa5c75, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732123627221 2024-11-20T17:27:12,756 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ec4072c860d4da99f96a9d227681f96, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732123628349 2024-11-20T17:27:12,756 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting b3ea5a58cc144676a0a53df54d81c8c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732123628349 2024-11-20T17:27:12,757 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] compactions.Compactor(224): Compacting 655aed42ba9743fbb7d8f7f26f014f6b, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732123630474 2024-11-20T17:27:12,757 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 5acb683a66d3415cb43d1a268c1d9aaa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732123630477 2024-11-20T17:27:12,762 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:12,762 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#B#compaction#489 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:12,763 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/cb7db616540b40878432c2a0155ede75 is 50, key is test_row_0/B:col10/1732123630477/Put/seqid=0 2024-11-20T17:27:12,765 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120a4f8574f16db498f8272d49173ab0e5d_461e1341891f7389f7e7df762c9abfba store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:12,767 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120a4f8574f16db498f8272d49173ab0e5d_461e1341891f7389f7e7df762c9abfba, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:12,767 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a4f8574f16db498f8272d49173ab0e5d_461e1341891f7389f7e7df762c9abfba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:12,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742409_1585 (size=4469) 2024-11-20T17:27:12,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742408_1584 (size=12731) 2024-11-20T17:27:12,781 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#A#compaction#490 average throughput is 1.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:12,781 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/8472e4e3e1c24ee98e7872921d5b3415 is 175, key is test_row_0/A:col10/1732123630477/Put/seqid=0 2024-11-20T17:27:12,784 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/cb7db616540b40878432c2a0155ede75 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/cb7db616540b40878432c2a0155ede75 2024-11-20T17:27:12,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742410_1586 (size=31685) 2024-11-20T17:27:12,789 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/B of 461e1341891f7389f7e7df762c9abfba into cb7db616540b40878432c2a0155ede75(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:12,789 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:12,789 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/B, priority=13, startTime=1732123632752; duration=0sec 2024-11-20T17:27:12,789 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T17:27:12,789 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:B 2024-11-20T17:27:12,789 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T17:27:12,790 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T17:27:12,790 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1540): 461e1341891f7389f7e7df762c9abfba/C is initiating minor compaction (all files) 2024-11-20T17:27:12,790 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 461e1341891f7389f7e7df762c9abfba/C in TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,790 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9a6a950350eb4dbf947f51821f76416c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f9080c5f8d994833b75b137c0d99be17, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c71480e181c54534aea82dce3038fdd5] into tmpdir=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp, totalSize=36.1 K 2024-11-20T17:27:12,790 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a6a950350eb4dbf947f51821f76416c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732123627221 2024-11-20T17:27:12,791 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting f9080c5f8d994833b75b137c0d99be17, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732123628349 2024-11-20T17:27:12,791 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] compactions.Compactor(224): Compacting c71480e181c54534aea82dce3038fdd5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732123630477 2024-11-20T17:27:12,798 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 461e1341891f7389f7e7df762c9abfba#C#compaction#491 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T17:27:12,799 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/c5e40e6aadf24816befbdc6dc2e7e12d is 50, key is test_row_0/C:col10/1732123630477/Put/seqid=0 2024-11-20T17:27:12,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742411_1587 (size=12731) 2024-11-20T17:27:12,851 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:12,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T17:27:12,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:12,851 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T17:27:12,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:12,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:12,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:12,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:12,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:12,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:12,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bcc3a43d2afd4e359c0125675f4d5594_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123631123/Put/seqid=0 2024-11-20T17:27:12,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742412_1588 (size=12454) 2024-11-20T17:27:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:27:13,191 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/8472e4e3e1c24ee98e7872921d5b3415 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8472e4e3e1c24ee98e7872921d5b3415 2024-11-20T17:27:13,195 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/A of 461e1341891f7389f7e7df762c9abfba into 8472e4e3e1c24ee98e7872921d5b3415(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:13,195 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:13,195 INFO [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/A, priority=13, startTime=1732123632752; duration=0sec 2024-11-20T17:27:13,195 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:13,195 DEBUG [RS:0;d514dc944523:44015-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:A 2024-11-20T17:27:13,208 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/c5e40e6aadf24816befbdc6dc2e7e12d as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c5e40e6aadf24816befbdc6dc2e7e12d 2024-11-20T17:27:13,212 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 461e1341891f7389f7e7df762c9abfba/C of 461e1341891f7389f7e7df762c9abfba into c5e40e6aadf24816befbdc6dc2e7e12d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T17:27:13,212 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:13,212 INFO [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba., storeName=461e1341891f7389f7e7df762c9abfba/C, priority=13, startTime=1732123632752; duration=0sec 2024-11-20T17:27:13,212 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T17:27:13,212 DEBUG [RS:0;d514dc944523:44015-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 461e1341891f7389f7e7df762c9abfba:C 2024-11-20T17:27:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:13,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:13,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:13,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123693261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123693262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123693262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123693263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,266 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bcc3a43d2afd4e359c0125675f4d5594_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bcc3a43d2afd4e359c0125675f4d5594_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:13,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/2218e9d032df4cb98e48f70a452d192f, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:13,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/2218e9d032df4cb98e48f70a452d192f is 175, key is test_row_0/A:col10/1732123631123/Put/seqid=0 2024-11-20T17:27:13,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742413_1589 (size=31255) 2024-11-20T17:27:13,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123693365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123693365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123693365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123693366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123693568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123693568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123693568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123693569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,671 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=273, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/2218e9d032df4cb98e48f70a452d192f 2024-11-20T17:27:13,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/8643d08216dd4977b0ee18be78f7ea5c is 50, key is test_row_0/B:col10/1732123631123/Put/seqid=0 2024-11-20T17:27:13,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742414_1590 (size=12301) 2024-11-20T17:27:13,699 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T17:27:13,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123693871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123693871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123693871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:13,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123693873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:14,081 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/8643d08216dd4977b0ee18be78f7ea5c 2024-11-20T17:27:14,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/36b75b24f887405c9362f70a751ed30c is 50, key is test_row_0/C:col10/1732123631123/Put/seqid=0 2024-11-20T17:27:14,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742415_1591 (size=12301) 2024-11-20T17:27:14,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:14,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123694375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:14,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:14,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123694375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:14,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:14,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123694376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:14,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:14,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123694377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:14,492 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/36b75b24f887405c9362f70a751ed30c 2024-11-20T17:27:14,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/2218e9d032df4cb98e48f70a452d192f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/2218e9d032df4cb98e48f70a452d192f 2024-11-20T17:27:14,499 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/2218e9d032df4cb98e48f70a452d192f, entries=150, sequenceid=273, filesize=30.5 K 2024-11-20T17:27:14,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/8643d08216dd4977b0ee18be78f7ea5c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/8643d08216dd4977b0ee18be78f7ea5c 2024-11-20T17:27:14,503 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/8643d08216dd4977b0ee18be78f7ea5c, entries=150, sequenceid=273, filesize=12.0 K 2024-11-20T17:27:14,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/36b75b24f887405c9362f70a751ed30c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/36b75b24f887405c9362f70a751ed30c 2024-11-20T17:27:14,506 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/36b75b24f887405c9362f70a751ed30c, entries=150, sequenceid=273, filesize=12.0 K 2024-11-20T17:27:14,507 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 461e1341891f7389f7e7df762c9abfba in 1656ms, sequenceid=273, compaction requested=false 2024-11-20T17:27:14,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:14,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:14,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-20T17:27:14,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-20T17:27:14,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-20T17:27:14,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4870 sec 2024-11-20T17:27:14,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 3.4910 sec 2024-11-20T17:27:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T17:27:15,125 INFO [Thread-2306 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-20T17:27:15,126 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:27:15,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-20T17:27:15,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:27:15,128 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:27:15,128 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:27:15,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:27:15,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:27:15,280 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:15,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T17:27:15,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:15,280 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T17:27:15,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:15,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:15,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:15,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:15,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:15,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:15,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b94ebf58030d4a759d3b6ad627a4ba0a_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123633261/Put/seqid=0 2024-11-20T17:27:15,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742416_1592 (size=12454) 2024-11-20T17:27:15,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:15,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:15,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123695422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123695424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123695425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123695425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:27:15,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123695526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123695528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123695528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123695528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:15,695 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b94ebf58030d4a759d3b6ad627a4ba0a_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b94ebf58030d4a759d3b6ad627a4ba0a_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:15,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/427c0ffd5576427c90f56fa77929918b, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:15,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/427c0ffd5576427c90f56fa77929918b is 175, key is test_row_0/A:col10/1732123633261/Put/seqid=0 2024-11-20T17:27:15,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742417_1593 (size=31255) 2024-11-20T17:27:15,700 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/427c0ffd5576427c90f56fa77929918b 2024-11-20T17:27:15,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/cb6d2999dfd14660992b4a6859433e32 is 50, key is test_row_0/B:col10/1732123633261/Put/seqid=0 2024-11-20T17:27:15,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742418_1594 (size=12301) 2024-11-20T17:27:15,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:27:15,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123695730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123695731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123695732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:15,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:15,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123695732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:16,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:16,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44730 deadline: 1732123696032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:16,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:16,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44690 deadline: 1732123696034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:16,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:16,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44666 deadline: 1732123696035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:16,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T17:27:16,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44015 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44708 deadline: 1732123696037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 2024-11-20T17:27:16,110 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/cb6d2999dfd14660992b4a6859433e32 2024-11-20T17:27:16,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f8624ab65fb04f74815864066d1ed2ad is 50, key is test_row_0/C:col10/1732123633261/Put/seqid=0 2024-11-20T17:27:16,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742419_1595 (size=12301) 2024-11-20T17:27:16,120 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f8624ab65fb04f74815864066d1ed2ad 2024-11-20T17:27:16,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/427c0ffd5576427c90f56fa77929918b as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/427c0ffd5576427c90f56fa77929918b 2024-11-20T17:27:16,133 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/427c0ffd5576427c90f56fa77929918b, entries=150, sequenceid=290, filesize=30.5 K 2024-11-20T17:27:16,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/cb6d2999dfd14660992b4a6859433e32 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/cb6d2999dfd14660992b4a6859433e32 2024-11-20T17:27:16,136 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/cb6d2999dfd14660992b4a6859433e32, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T17:27:16,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/f8624ab65fb04f74815864066d1ed2ad as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f8624ab65fb04f74815864066d1ed2ad 2024-11-20T17:27:16,141 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f8624ab65fb04f74815864066d1ed2ad, entries=150, sequenceid=290, filesize=12.0 K 2024-11-20T17:27:16,141 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 461e1341891f7389f7e7df762c9abfba in 861ms, sequenceid=290, compaction requested=true 2024-11-20T17:27:16,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:16,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:16,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-20T17:27:16,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-20T17:27:16,143 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-20T17:27:16,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0140 sec 2024-11-20T17:27:16,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.0180 sec 2024-11-20T17:27:16,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T17:27:16,231 INFO [Thread-2306 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-20T17:27:16,232 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T17:27:16,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-20T17:27:16,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:27:16,233 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T17:27:16,234 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T17:27:16,234 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T17:27:16,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:27:16,385 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:16,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44015 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T17:27:16,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:16,386 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T17:27:16,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:16,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:16,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:16,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:16,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:16,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:16,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e96c091c2901469db2ad314f06b258bc_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123635423/Put/seqid=0 2024-11-20T17:27:16,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742420_1596 (size=12454) 2024-11-20T17:27:16,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:16,398 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e96c091c2901469db2ad314f06b258bc_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e96c091c2901469db2ad314f06b258bc_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:16,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/7a05e27b85444c53a83b93f5801e6aed, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:16,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/7a05e27b85444c53a83b93f5801e6aed is 175, key is test_row_0/A:col10/1732123635423/Put/seqid=0 2024-11-20T17:27:16,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742421_1597 (size=31255) 2024-11-20T17:27:16,513 DEBUG [Thread-2315 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x539997ae to 127.0.0.1:56028 2024-11-20T17:27:16,514 DEBUG [Thread-2315 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,514 DEBUG [Thread-2309 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63054209 to 127.0.0.1:56028 2024-11-20T17:27:16,514 DEBUG [Thread-2309 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,515 DEBUG [Thread-2311 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fbb1399 to 127.0.0.1:56028 2024-11-20T17:27:16,515 DEBUG [Thread-2311 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,515 DEBUG [Thread-2313 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51fccca6 to 127.0.0.1:56028 2024-11-20T17:27:16,515 DEBUG [Thread-2313 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,515 DEBUG [Thread-2307 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d02ace0 to 127.0.0.1:56028 2024-11-20T17:27:16,515 DEBUG [Thread-2307 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:27:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44015 {}] regionserver.HRegion(8581): Flush requested on 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:16,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. as already flushing 2024-11-20T17:27:16,539 DEBUG [Thread-2304 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31f7e171 to 127.0.0.1:56028 2024-11-20T17:27:16,539 DEBUG [Thread-2304 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,540 DEBUG [Thread-2300 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79a7bd2b to 127.0.0.1:56028 2024-11-20T17:27:16,540 DEBUG [Thread-2298 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cc71f2e to 127.0.0.1:56028 2024-11-20T17:27:16,540 DEBUG [Thread-2300 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,540 DEBUG [Thread-2298 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,543 DEBUG [Thread-2302 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d688bcb to 127.0.0.1:56028 2024-11-20T17:27:16,543 DEBUG [Thread-2302 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:16,803 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=311, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/7a05e27b85444c53a83b93f5801e6aed 2024-11-20T17:27:16,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/c29309cbd0f04f8c95e7b722b3e7f4a8 is 50, key is test_row_0/B:col10/1732123635423/Put/seqid=0 2024-11-20T17:27:16,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742422_1598 (size=12301) 2024-11-20T17:27:16,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:27:16,931 DEBUG [Thread-2296 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65f51785 to 127.0.0.1:56028 2024-11-20T17:27:16,931 DEBUG [Thread-2296 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:17,212 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/c29309cbd0f04f8c95e7b722b3e7f4a8 2024-11-20T17:27:17,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/62337236fe3144128bbf2e9f9215dc49 is 50, key is test_row_0/C:col10/1732123635423/Put/seqid=0 2024-11-20T17:27:17,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742423_1599 (size=12301) 2024-11-20T17:27:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:27:17,621 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/62337236fe3144128bbf2e9f9215dc49 2024-11-20T17:27:17,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/7a05e27b85444c53a83b93f5801e6aed as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/7a05e27b85444c53a83b93f5801e6aed 2024-11-20T17:27:17,627 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/7a05e27b85444c53a83b93f5801e6aed, entries=150, sequenceid=311, filesize=30.5 K 2024-11-20T17:27:17,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/c29309cbd0f04f8c95e7b722b3e7f4a8 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/c29309cbd0f04f8c95e7b722b3e7f4a8 2024-11-20T17:27:17,630 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/c29309cbd0f04f8c95e7b722b3e7f4a8, entries=150, sequenceid=311, filesize=12.0 K 2024-11-20T17:27:17,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/62337236fe3144128bbf2e9f9215dc49 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/62337236fe3144128bbf2e9f9215dc49 2024-11-20T17:27:17,632 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/62337236fe3144128bbf2e9f9215dc49, entries=150, sequenceid=311, filesize=12.0 K 2024-11-20T17:27:17,633 INFO [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=33.54 KB/34350 for 461e1341891f7389f7e7df762c9abfba in 1247ms, sequenceid=311, compaction requested=true 2024-11-20T17:27:17,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:17,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:17,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/d514dc944523:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-20T17:27:17,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-20T17:27:17,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-20T17:27:17,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4000 sec 2024-11-20T17:27:17,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.4030 sec 2024-11-20T17:27:18,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T17:27:18,337 INFO [Thread-2306 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 9 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7028 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6973 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6835 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7001 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7005 2024-11-20T17:27:18,337 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T17:27:18,337 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:27:18,337 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2089b1f4 to 127.0.0.1:56028 2024-11-20T17:27:18,337 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:18,338 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T17:27:18,338 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T17:27:18,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T17:27:18,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:27:18,340 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123638340"}]},"ts":"1732123638340"} 2024-11-20T17:27:18,341 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T17:27:18,344 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T17:27:18,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T17:27:18,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, UNASSIGN}] 2024-11-20T17:27:18,345 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, UNASSIGN 2024-11-20T17:27:18,346 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=167 updating hbase:meta row=461e1341891f7389f7e7df762c9abfba, regionState=CLOSING, regionLocation=d514dc944523,44015,1732123455293 2024-11-20T17:27:18,346 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T17:27:18,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; CloseRegionProcedure 461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293}] 2024-11-20T17:27:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:27:18,498 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to d514dc944523,44015,1732123455293 2024-11-20T17:27:18,498 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] handler.UnassignRegionHandler(124): Close 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:18,498 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T17:27:18,498 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1681): Closing 461e1341891f7389f7e7df762c9abfba, disabling compactions & flushes 2024-11-20T17:27:18,498 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:18,498 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:18,498 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. after waiting 0 ms 2024-11-20T17:27:18,498 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:18,498 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(2837): Flushing 461e1341891f7389f7e7df762c9abfba 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T17:27:18,499 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=A 2024-11-20T17:27:18,499 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:18,499 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=B 2024-11-20T17:27:18,499 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:18,499 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 461e1341891f7389f7e7df762c9abfba, store=C 2024-11-20T17:27:18,499 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T17:27:18,503 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d85b14dba3ac42cb8649051fbf6d7aa1_461e1341891f7389f7e7df762c9abfba is 50, key is test_row_0/A:col10/1732123636542/Put/seqid=0 2024-11-20T17:27:18,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742424_1600 (size=12454) 2024-11-20T17:27:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:27:18,907 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T17:27:18,910 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d85b14dba3ac42cb8649051fbf6d7aa1_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d85b14dba3ac42cb8649051fbf6d7aa1_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:18,910 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/aa00ad47696d4b849e54c2a3c9f8c188, store: [table=TestAcidGuarantees family=A region=461e1341891f7389f7e7df762c9abfba] 2024-11-20T17:27:18,911 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/aa00ad47696d4b849e54c2a3c9f8c188 is 175, key is test_row_0/A:col10/1732123636542/Put/seqid=0 2024-11-20T17:27:18,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742425_1601 (size=31255) 2024-11-20T17:27:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:27:19,314 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=319, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/aa00ad47696d4b849e54c2a3c9f8c188 2024-11-20T17:27:19,319 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/7dbf4326410a4db88a9fb5a0d6128a5f is 50, key is test_row_0/B:col10/1732123636542/Put/seqid=0 2024-11-20T17:27:19,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742426_1602 (size=12301) 2024-11-20T17:27:19,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:27:19,723 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/7dbf4326410a4db88a9fb5a0d6128a5f 2024-11-20T17:27:19,728 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/31da7f5b6bfd4b49b0bf7bbe3279f4f2 is 50, key is test_row_0/C:col10/1732123636542/Put/seqid=0 2024-11-20T17:27:19,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742427_1603 (size=12301) 2024-11-20T17:27:20,132 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/31da7f5b6bfd4b49b0bf7bbe3279f4f2 2024-11-20T17:27:20,135 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/A/aa00ad47696d4b849e54c2a3c9f8c188 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/aa00ad47696d4b849e54c2a3c9f8c188 2024-11-20T17:27:20,137 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/aa00ad47696d4b849e54c2a3c9f8c188, entries=150, sequenceid=319, filesize=30.5 K 2024-11-20T17:27:20,138 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/B/7dbf4326410a4db88a9fb5a0d6128a5f as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/7dbf4326410a4db88a9fb5a0d6128a5f 2024-11-20T17:27:20,140 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/7dbf4326410a4db88a9fb5a0d6128a5f, entries=150, sequenceid=319, filesize=12.0 K 2024-11-20T17:27:20,141 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/.tmp/C/31da7f5b6bfd4b49b0bf7bbe3279f4f2 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/31da7f5b6bfd4b49b0bf7bbe3279f4f2 2024-11-20T17:27:20,143 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/31da7f5b6bfd4b49b0bf7bbe3279f4f2, entries=150, sequenceid=319, filesize=12.0 K 2024-11-20T17:27:20,143 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 461e1341891f7389f7e7df762c9abfba in 1645ms, sequenceid=319, compaction requested=true 2024-11-20T17:27:20,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/ef1a15c39a9f4b6786e096a6020c36bd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/abd3206087d04974bcdc8d4c58e3df19, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/18a49cd84ee742beb0b8e84e698bf32a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/9b9760076dbe4e53a9649d0f5e0effb7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c88e13696eb846f7a12faf3adcb60d88, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/a36098c8697c44c085182d37cf172559, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/41a8e78a73bd4acc8c94739478e9f093, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/647ba7d3d16f448d97a0e53696aa5b45, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/6cebcbd842c44ed3b0213ebbb6293b26, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/11321aae73024503be8c40774403b5b4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/5a3c9fb6d60c4cf5ac159c1213d75169, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8b844c58ef794ac2bfdea4556e5ef5d4, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/15f55ad203ed432e82686d9884cbe73b, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c5a6e7fa2206400f91dba53f20aa5c75, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/08890484f78740a29941a43219a164ff, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/4ec4072c860d4da99f96a9d227681f96, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/655aed42ba9743fbb7d8f7f26f014f6b] to archive 2024-11-20T17:27:20,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:27:20,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/ef1a15c39a9f4b6786e096a6020c36bd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/ef1a15c39a9f4b6786e096a6020c36bd 2024-11-20T17:27:20,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/abd3206087d04974bcdc8d4c58e3df19 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/abd3206087d04974bcdc8d4c58e3df19 2024-11-20T17:27:20,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/18a49cd84ee742beb0b8e84e698bf32a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/18a49cd84ee742beb0b8e84e698bf32a 2024-11-20T17:27:20,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/9b9760076dbe4e53a9649d0f5e0effb7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/9b9760076dbe4e53a9649d0f5e0effb7 2024-11-20T17:27:20,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c88e13696eb846f7a12faf3adcb60d88 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c88e13696eb846f7a12faf3adcb60d88 2024-11-20T17:27:20,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/a36098c8697c44c085182d37cf172559 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/a36098c8697c44c085182d37cf172559 2024-11-20T17:27:20,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/41a8e78a73bd4acc8c94739478e9f093 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/41a8e78a73bd4acc8c94739478e9f093 2024-11-20T17:27:20,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/647ba7d3d16f448d97a0e53696aa5b45 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/647ba7d3d16f448d97a0e53696aa5b45 2024-11-20T17:27:20,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/6cebcbd842c44ed3b0213ebbb6293b26 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/6cebcbd842c44ed3b0213ebbb6293b26 2024-11-20T17:27:20,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/11321aae73024503be8c40774403b5b4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/11321aae73024503be8c40774403b5b4 2024-11-20T17:27:20,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/5a3c9fb6d60c4cf5ac159c1213d75169 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/5a3c9fb6d60c4cf5ac159c1213d75169 2024-11-20T17:27:20,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8b844c58ef794ac2bfdea4556e5ef5d4 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8b844c58ef794ac2bfdea4556e5ef5d4 2024-11-20T17:27:20,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/15f55ad203ed432e82686d9884cbe73b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/15f55ad203ed432e82686d9884cbe73b 2024-11-20T17:27:20,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c5a6e7fa2206400f91dba53f20aa5c75 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/c5a6e7fa2206400f91dba53f20aa5c75 2024-11-20T17:27:20,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/08890484f78740a29941a43219a164ff to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/08890484f78740a29941a43219a164ff 2024-11-20T17:27:20,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/4ec4072c860d4da99f96a9d227681f96 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/4ec4072c860d4da99f96a9d227681f96 2024-11-20T17:27:20,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/655aed42ba9743fbb7d8f7f26f014f6b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/655aed42ba9743fbb7d8f7f26f014f6b 2024-11-20T17:27:20,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d2f78ab39ee242498792689a3902facd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/bf83bfaa1cc14d56841b69d1d71a07cf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/3b3dab61c09846499a2cb865140c5bb7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/51686b493c1e4d719e816fa5bdb5f8fe, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/9fde4b93322943e6bfe0f022d11c3320, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2e60e5b6b6594d578c093e716ec5ea01, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/648108316ecd4c859b81ca8d187d2100, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d007dc44d83d4ce59d222d0b1345f57d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2139bbaf83bf44c5bc3098c2372b0ddf, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/a4ad9e5cb44441d9b58216cd98914f07, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/36cde178663440f28b2d8bdc4f3bd241, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/0284923f57c140d5bb17dfae965d9b31, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/4c3a2e8b5fbb48e79314b6e838a9b855, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/213aeaf23ecd46adbd8f7104f47d6262, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/e23948382fb043e79157be12f48d39e1, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/b3ea5a58cc144676a0a53df54d81c8c7, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/5acb683a66d3415cb43d1a268c1d9aaa] to archive 2024-11-20T17:27:20,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:27:20,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d2f78ab39ee242498792689a3902facd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d2f78ab39ee242498792689a3902facd 2024-11-20T17:27:20,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/bf83bfaa1cc14d56841b69d1d71a07cf to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/bf83bfaa1cc14d56841b69d1d71a07cf 2024-11-20T17:27:20,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/3b3dab61c09846499a2cb865140c5bb7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/3b3dab61c09846499a2cb865140c5bb7 2024-11-20T17:27:20,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/51686b493c1e4d719e816fa5bdb5f8fe to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/51686b493c1e4d719e816fa5bdb5f8fe 2024-11-20T17:27:20,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/9fde4b93322943e6bfe0f022d11c3320 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/9fde4b93322943e6bfe0f022d11c3320 2024-11-20T17:27:20,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2e60e5b6b6594d578c093e716ec5ea01 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2e60e5b6b6594d578c093e716ec5ea01 2024-11-20T17:27:20,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/648108316ecd4c859b81ca8d187d2100 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/648108316ecd4c859b81ca8d187d2100 2024-11-20T17:27:20,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d007dc44d83d4ce59d222d0b1345f57d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/d007dc44d83d4ce59d222d0b1345f57d 2024-11-20T17:27:20,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2139bbaf83bf44c5bc3098c2372b0ddf to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/2139bbaf83bf44c5bc3098c2372b0ddf 2024-11-20T17:27:20,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/a4ad9e5cb44441d9b58216cd98914f07 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/a4ad9e5cb44441d9b58216cd98914f07 2024-11-20T17:27:20,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/36cde178663440f28b2d8bdc4f3bd241 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/36cde178663440f28b2d8bdc4f3bd241 2024-11-20T17:27:20,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/0284923f57c140d5bb17dfae965d9b31 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/0284923f57c140d5bb17dfae965d9b31 2024-11-20T17:27:20,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/4c3a2e8b5fbb48e79314b6e838a9b855 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/4c3a2e8b5fbb48e79314b6e838a9b855 2024-11-20T17:27:20,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/213aeaf23ecd46adbd8f7104f47d6262 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/213aeaf23ecd46adbd8f7104f47d6262 2024-11-20T17:27:20,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/e23948382fb043e79157be12f48d39e1 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/e23948382fb043e79157be12f48d39e1 2024-11-20T17:27:20,173 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/b3ea5a58cc144676a0a53df54d81c8c7 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/b3ea5a58cc144676a0a53df54d81c8c7 2024-11-20T17:27:20,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/5acb683a66d3415cb43d1a268c1d9aaa to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/5acb683a66d3415cb43d1a268c1d9aaa 2024-11-20T17:27:20,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/63c99488ff82433ea28740536796940d, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5b34f199d6da4112968c7a0315908801, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70a11f6869be4e6c9f1ce9152d0e721e, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5aeb3b87f6ad43878eb1768ab2971684, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/fb28edc7e00d43b1a3e2991d2060e9b2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/d196d1ea2f41405792036c05e926fea8, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c672cc6d69a7433f8f1029d64ce3ffb2, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9254c607398948c585a5b395761faa67, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/074318c71fd54f49bbccb84a4f938b74, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70f012b394df499c93a0c77eb4425d4a, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f3d3a86fe63c4cffa80131912514f3f0, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/ef3371d00d584728b7238c938670744f, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/db609151561242758190b1c93dd24dfd, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9a6a950350eb4dbf947f51821f76416c, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/a8c8f6a5d5df4f099d31661970cbdabe, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f9080c5f8d994833b75b137c0d99be17, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c71480e181c54534aea82dce3038fdd5] to archive 2024-11-20T17:27:20,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T17:27:20,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/63c99488ff82433ea28740536796940d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/63c99488ff82433ea28740536796940d 2024-11-20T17:27:20,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5b34f199d6da4112968c7a0315908801 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5b34f199d6da4112968c7a0315908801 2024-11-20T17:27:20,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70a11f6869be4e6c9f1ce9152d0e721e to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70a11f6869be4e6c9f1ce9152d0e721e 2024-11-20T17:27:20,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5aeb3b87f6ad43878eb1768ab2971684 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/5aeb3b87f6ad43878eb1768ab2971684 2024-11-20T17:27:20,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/fb28edc7e00d43b1a3e2991d2060e9b2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/fb28edc7e00d43b1a3e2991d2060e9b2 2024-11-20T17:27:20,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/d196d1ea2f41405792036c05e926fea8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/d196d1ea2f41405792036c05e926fea8 2024-11-20T17:27:20,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c672cc6d69a7433f8f1029d64ce3ffb2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c672cc6d69a7433f8f1029d64ce3ffb2 2024-11-20T17:27:20,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9254c607398948c585a5b395761faa67 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9254c607398948c585a5b395761faa67 2024-11-20T17:27:20,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/074318c71fd54f49bbccb84a4f938b74 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/074318c71fd54f49bbccb84a4f938b74 2024-11-20T17:27:20,184 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70f012b394df499c93a0c77eb4425d4a to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/70f012b394df499c93a0c77eb4425d4a 2024-11-20T17:27:20,185 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f3d3a86fe63c4cffa80131912514f3f0 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f3d3a86fe63c4cffa80131912514f3f0 2024-11-20T17:27:20,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/ef3371d00d584728b7238c938670744f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/ef3371d00d584728b7238c938670744f 2024-11-20T17:27:20,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/db609151561242758190b1c93dd24dfd to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/db609151561242758190b1c93dd24dfd 2024-11-20T17:27:20,187 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9a6a950350eb4dbf947f51821f76416c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/9a6a950350eb4dbf947f51821f76416c 2024-11-20T17:27:20,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/a8c8f6a5d5df4f099d31661970cbdabe to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/a8c8f6a5d5df4f099d31661970cbdabe 2024-11-20T17:27:20,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f9080c5f8d994833b75b137c0d99be17 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f9080c5f8d994833b75b137c0d99be17 2024-11-20T17:27:20,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c71480e181c54534aea82dce3038fdd5 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c71480e181c54534aea82dce3038fdd5 2024-11-20T17:27:20,193 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/recovered.edits/322.seqid, newMaxSeqId=322, maxSeqId=4 2024-11-20T17:27:20,193 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba. 2024-11-20T17:27:20,193 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] regionserver.HRegion(1635): Region close journal for 461e1341891f7389f7e7df762c9abfba: 2024-11-20T17:27:20,194 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION, pid=168}] handler.UnassignRegionHandler(170): Closed 461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,195 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=167 updating hbase:meta row=461e1341891f7389f7e7df762c9abfba, regionState=CLOSED 2024-11-20T17:27:20,196 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-20T17:27:20,196 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; CloseRegionProcedure 461e1341891f7389f7e7df762c9abfba, server=d514dc944523,44015,1732123455293 in 1.8490 sec 2024-11-20T17:27:20,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-11-20T17:27:20,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=461e1341891f7389f7e7df762c9abfba, UNASSIGN in 1.8510 sec 2024-11-20T17:27:20,198 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-20T17:27:20,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8540 sec 2024-11-20T17:27:20,199 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732123640199"}]},"ts":"1732123640199"} 2024-11-20T17:27:20,200 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T17:27:20,202 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T17:27:20,203 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8650 sec 2024-11-20T17:27:20,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T17:27:20,444 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-20T17:27:20,444 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T17:27:20,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:27:20,446 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=169, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:27:20,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T17:27:20,446 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=169, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:27:20,447 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,449 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C, FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/recovered.edits] 2024-11-20T17:27:20,451 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/2218e9d032df4cb98e48f70a452d192f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/2218e9d032df4cb98e48f70a452d192f 2024-11-20T17:27:20,452 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/427c0ffd5576427c90f56fa77929918b to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/427c0ffd5576427c90f56fa77929918b 2024-11-20T17:27:20,453 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/7a05e27b85444c53a83b93f5801e6aed to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/7a05e27b85444c53a83b93f5801e6aed 2024-11-20T17:27:20,454 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8472e4e3e1c24ee98e7872921d5b3415 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/8472e4e3e1c24ee98e7872921d5b3415 2024-11-20T17:27:20,455 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/aa00ad47696d4b849e54c2a3c9f8c188 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/A/aa00ad47696d4b849e54c2a3c9f8c188 2024-11-20T17:27:20,456 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/7dbf4326410a4db88a9fb5a0d6128a5f to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/7dbf4326410a4db88a9fb5a0d6128a5f 2024-11-20T17:27:20,457 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/8643d08216dd4977b0ee18be78f7ea5c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/8643d08216dd4977b0ee18be78f7ea5c 2024-11-20T17:27:20,458 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/c29309cbd0f04f8c95e7b722b3e7f4a8 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/c29309cbd0f04f8c95e7b722b3e7f4a8 2024-11-20T17:27:20,459 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/cb6d2999dfd14660992b4a6859433e32 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/cb6d2999dfd14660992b4a6859433e32 2024-11-20T17:27:20,460 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/cb7db616540b40878432c2a0155ede75 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/B/cb7db616540b40878432c2a0155ede75 2024-11-20T17:27:20,461 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/31da7f5b6bfd4b49b0bf7bbe3279f4f2 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/31da7f5b6bfd4b49b0bf7bbe3279f4f2 2024-11-20T17:27:20,462 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/36b75b24f887405c9362f70a751ed30c to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/36b75b24f887405c9362f70a751ed30c 2024-11-20T17:27:20,463 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/62337236fe3144128bbf2e9f9215dc49 to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/62337236fe3144128bbf2e9f9215dc49 2024-11-20T17:27:20,464 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c5e40e6aadf24816befbdc6dc2e7e12d to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/c5e40e6aadf24816befbdc6dc2e7e12d 2024-11-20T17:27:20,464 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f8624ab65fb04f74815864066d1ed2ad to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/C/f8624ab65fb04f74815864066d1ed2ad 2024-11-20T17:27:20,466 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/recovered.edits/322.seqid to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba/recovered.edits/322.seqid 2024-11-20T17:27:20,467 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/default/TestAcidGuarantees/461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,467 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T17:27:20,467 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:27:20,468 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T17:27:20,470 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120197a59ec35284b0887d11715504593e5_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120197a59ec35284b0887d11715504593e5_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,471 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202ce2916de38d42e69e96ae98bbc8f4ca_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202ce2916de38d42e69e96ae98bbc8f4ca_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,471 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202ed1f4d6769f4c08a6b10cebf6cf9473_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202ed1f4d6769f4c08a6b10cebf6cf9473_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,472 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203117f867f36647f281039074a1f7297f_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203117f867f36647f281039074a1f7297f_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,473 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f09860073f140b1abdeba24f7994e3e_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207f09860073f140b1abdeba24f7994e3e_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,474 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120831286b6d3f2492f8a815549e9ef1d2b_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120831286b6d3f2492f8a815549e9ef1d2b_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,475 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112086fc7db12d0e4425add4c2a70bd3aca9_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112086fc7db12d0e4425add4c2a70bd3aca9_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,476 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112090403f5ce3844f96b4a03523a5de235c_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112090403f5ce3844f96b4a03523a5de235c_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,477 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aa46eb11841c4366ac0aeff01401f251_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120aa46eb11841c4366ac0aeff01401f251_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,477 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ac4adabe56c84fc8972bd7cd73d819a0_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ac4adabe56c84fc8972bd7cd73d819a0_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,478 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b3132b97ee2e4893945b103910e681aa_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b3132b97ee2e4893945b103910e681aa_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,479 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b94ebf58030d4a759d3b6ad627a4ba0a_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b94ebf58030d4a759d3b6ad627a4ba0a_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,480 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bcc3a43d2afd4e359c0125675f4d5594_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bcc3a43d2afd4e359c0125675f4d5594_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,481 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bea376097c9d47af9c095a7720bfe84a_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bea376097c9d47af9c095a7720bfe84a_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,481 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d004448d511a47ffb715924c3ae33414_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d004448d511a47ffb715924c3ae33414_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,482 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d85b14dba3ac42cb8649051fbf6d7aa1_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d85b14dba3ac42cb8649051fbf6d7aa1_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,483 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e96c091c2901469db2ad314f06b258bc_461e1341891f7389f7e7df762c9abfba to hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e96c091c2901469db2ad314f06b258bc_461e1341891f7389f7e7df762c9abfba 2024-11-20T17:27:20,483 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T17:27:20,485 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=169, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:27:20,487 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T17:27:20,489 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T17:27:20,489 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=169, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:27:20,489 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T17:27:20,489 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732123640489"}]},"ts":"9223372036854775807"} 2024-11-20T17:27:20,491 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T17:27:20,491 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 461e1341891f7389f7e7df762c9abfba, NAME => 'TestAcidGuarantees,,1732123613430.461e1341891f7389f7e7df762c9abfba.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T17:27:20,491 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T17:27:20,491 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732123640491"}]},"ts":"9223372036854775807"} 2024-11-20T17:27:20,492 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T17:27:20,494 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=169, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T17:27:20,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 50 msec 2024-11-20T17:27:20,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35243 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T17:27:20,547 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-20T17:27:20,556 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=240 (was 239) - Thread LEAK? -, OpenFileDescriptor=459 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=328 (was 308) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6081 (was 6112) 2024-11-20T17:27:20,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T17:27:20,556 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T17:27:20,556 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6169df5c to 127.0.0.1:56028 2024-11-20T17:27:20,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:20,556 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T17:27:20,556 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=67080707, stopped=false 2024-11-20T17:27:20,557 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=d514dc944523,35243,1732123454567 2024-11-20T17:27:20,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T17:27:20,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T17:27:20,559 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T17:27:20,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:27:20,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:27:20,559 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:20,559 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T17:27:20,559 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T17:27:20,559 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'd514dc944523,44015,1732123455293' ***** 2024-11-20T17:27:20,559 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T17:27:20,560 INFO [RS:0;d514dc944523:44015 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T17:27:20,560 INFO [RS:0;d514dc944523:44015 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T17:27:20,560 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T17:27:20,560 INFO [RS:0;d514dc944523:44015 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T17:27:20,560 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(3579): Received CLOSE for e60665fdde91447ed275607cc98db134 2024-11-20T17:27:20,560 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1224): stopping server d514dc944523,44015,1732123455293 2024-11-20T17:27:20,560 DEBUG [RS:0;d514dc944523:44015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:20,561 INFO [RS:0;d514dc944523:44015 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T17:27:20,561 INFO [RS:0;d514dc944523:44015 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T17:27:20,561 INFO [RS:0;d514dc944523:44015 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T17:27:20,561 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T17:27:20,561 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing e60665fdde91447ed275607cc98db134, disabling compactions & flushes 2024-11-20T17:27:20,561 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:27:20,561 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:27:20,561 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. after waiting 0 ms 2024-11-20T17:27:20,561 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:27:20,561 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing e60665fdde91447ed275607cc98db134 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T17:27:20,561 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T17:27:20,561 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1603): Online Regions={e60665fdde91447ed275607cc98db134=hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T17:27:20,561 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T17:27:20,561 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T17:27:20,561 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T17:27:20,561 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T17:27:20,561 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T17:27:20,561 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T17:27:20,562 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e60665fdde91447ed275607cc98db134 2024-11-20T17:27:20,576 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134/.tmp/info/b896af22c2b64ab09332b789f6de551c is 45, key is default/info:d/1732123460190/Put/seqid=0 2024-11-20T17:27:20,577 INFO [regionserver/d514dc944523:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T17:27:20,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742428_1604 (size=5037) 2024-11-20T17:27:20,582 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/info/0ba9c41fb78b4083982e33575e6a97e7 is 143, key is hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134./info:regioninfo/1732123460078/Put/seqid=0 2024-11-20T17:27:20,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742429_1605 (size=7725) 2024-11-20T17:27:20,762 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e60665fdde91447ed275607cc98db134 2024-11-20T17:27:20,962 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, e60665fdde91447ed275607cc98db134 2024-11-20T17:27:20,980 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134/.tmp/info/b896af22c2b64ab09332b789f6de551c 2024-11-20T17:27:20,983 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134/.tmp/info/b896af22c2b64ab09332b789f6de551c as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134/info/b896af22c2b64ab09332b789f6de551c 2024-11-20T17:27:20,985 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/info/0ba9c41fb78b4083982e33575e6a97e7 2024-11-20T17:27:20,986 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134/info/b896af22c2b64ab09332b789f6de551c, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T17:27:20,986 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for e60665fdde91447ed275607cc98db134 in 425ms, sequenceid=6, compaction requested=false 2024-11-20T17:27:20,989 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/namespace/e60665fdde91447ed275607cc98db134/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T17:27:20,990 INFO [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:27:20,990 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for e60665fdde91447ed275607cc98db134: 2024-11-20T17:27:20,990 DEBUG [RS_CLOSE_REGION-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732123458839.e60665fdde91447ed275607cc98db134. 2024-11-20T17:27:21,002 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/rep_barrier/8d6be5faac784681a1995f429c634e68 is 102, key is TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc./rep_barrier:/1732123488063/DeleteFamily/seqid=0 2024-11-20T17:27:21,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742430_1606 (size=6025) 2024-11-20T17:27:21,162 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T17:27:21,363 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T17:27:21,406 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/rep_barrier/8d6be5faac784681a1995f429c634e68 2024-11-20T17:27:21,424 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/table/2c57626f61da4b3e8290c062b9b2d4eb is 96, key is TestAcidGuarantees,,1732123460424.895da877845d8163116b8248e2bc3ffc./table:/1732123488063/DeleteFamily/seqid=0 2024-11-20T17:27:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742431_1607 (size=5942) 2024-11-20T17:27:21,533 INFO [regionserver/d514dc944523:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T17:27:21,533 INFO [regionserver/d514dc944523:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T17:27:21,563 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-20T17:27:21,563 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-20T17:27:21,563 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T17:27:21,763 DEBUG [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T17:27:21,827 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/table/2c57626f61da4b3e8290c062b9b2d4eb 2024-11-20T17:27:21,831 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/info/0ba9c41fb78b4083982e33575e6a97e7 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/info/0ba9c41fb78b4083982e33575e6a97e7 2024-11-20T17:27:21,833 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/info/0ba9c41fb78b4083982e33575e6a97e7, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T17:27:21,834 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/rep_barrier/8d6be5faac784681a1995f429c634e68 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/rep_barrier/8d6be5faac784681a1995f429c634e68 2024-11-20T17:27:21,836 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/rep_barrier/8d6be5faac784681a1995f429c634e68, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T17:27:21,836 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/.tmp/table/2c57626f61da4b3e8290c062b9b2d4eb as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/table/2c57626f61da4b3e8290c062b9b2d4eb 2024-11-20T17:27:21,839 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/table/2c57626f61da4b3e8290c062b9b2d4eb, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T17:27:21,839 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1278ms, sequenceid=93, compaction requested=false 2024-11-20T17:27:21,843 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T17:27:21,843 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T17:27:21,843 INFO [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T17:27:21,843 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T17:27:21,843 DEBUG [RS_CLOSE_META-regionserver/d514dc944523:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T17:27:21,963 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1250): stopping server d514dc944523,44015,1732123455293; all regions closed. 2024-11-20T17:27:21,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741834_1010 (size=26050) 2024-11-20T17:27:21,969 DEBUG [RS:0;d514dc944523:44015 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/oldWALs 2024-11-20T17:27:21,969 INFO [RS:0;d514dc944523:44015 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL d514dc944523%2C44015%2C1732123455293.meta:.meta(num 1732123458592) 2024-11-20T17:27:21,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741832_1008 (size=13803906) 2024-11-20T17:27:21,973 DEBUG [RS:0;d514dc944523:44015 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/oldWALs 2024-11-20T17:27:21,973 INFO [RS:0;d514dc944523:44015 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL d514dc944523%2C44015%2C1732123455293:(num 1732123457643) 2024-11-20T17:27:21,973 DEBUG [RS:0;d514dc944523:44015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:21,973 INFO [RS:0;d514dc944523:44015 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T17:27:21,973 INFO [RS:0;d514dc944523:44015 {}] hbase.ChoreService(370): Chore service for: regionserver/d514dc944523:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-20T17:27:21,973 INFO [regionserver/d514dc944523:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T17:27:21,974 INFO [RS:0;d514dc944523:44015 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44015 2024-11-20T17:27:21,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T17:27:21,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d514dc944523,44015,1732123455293 2024-11-20T17:27:21,980 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d514dc944523,44015,1732123455293] 2024-11-20T17:27:21,980 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing d514dc944523,44015,1732123455293; numProcessing=1 2024-11-20T17:27:21,981 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/d514dc944523,44015,1732123455293 already deleted, retry=false 2024-11-20T17:27:21,981 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; d514dc944523,44015,1732123455293 expired; onlineServers=0 2024-11-20T17:27:21,981 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'd514dc944523,35243,1732123454567' ***** 2024-11-20T17:27:21,981 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T17:27:21,982 DEBUG [M:0;d514dc944523:35243 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238f856c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d514dc944523/172.17.0.2:0 2024-11-20T17:27:21,982 INFO [M:0;d514dc944523:35243 {}] regionserver.HRegionServer(1224): stopping server d514dc944523,35243,1732123454567 2024-11-20T17:27:21,982 INFO [M:0;d514dc944523:35243 {}] regionserver.HRegionServer(1250): stopping server d514dc944523,35243,1732123454567; all regions closed. 2024-11-20T17:27:21,982 DEBUG [M:0;d514dc944523:35243 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T17:27:21,982 DEBUG [M:0;d514dc944523:35243 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T17:27:21,982 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T17:27:21,982 DEBUG [M:0;d514dc944523:35243 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T17:27:21,982 DEBUG [master/d514dc944523:0:becomeActiveMaster-HFileCleaner.large.0-1732123457358 {}] cleaner.HFileCleaner(306): Exit Thread[master/d514dc944523:0:becomeActiveMaster-HFileCleaner.large.0-1732123457358,5,FailOnTimeoutGroup] 2024-11-20T17:27:21,982 DEBUG [master/d514dc944523:0:becomeActiveMaster-HFileCleaner.small.0-1732123457358 {}] cleaner.HFileCleaner(306): Exit Thread[master/d514dc944523:0:becomeActiveMaster-HFileCleaner.small.0-1732123457358,5,FailOnTimeoutGroup] 2024-11-20T17:27:21,982 INFO [M:0;d514dc944523:35243 {}] hbase.ChoreService(370): Chore service for: master/d514dc944523:0 had [] on shutdown 2024-11-20T17:27:21,983 DEBUG [M:0;d514dc944523:35243 {}] master.HMaster(1733): Stopping service threads 2024-11-20T17:27:21,983 INFO [M:0;d514dc944523:35243 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T17:27:21,983 ERROR [M:0;d514dc944523:35243 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T17:27:21,983 INFO [M:0;d514dc944523:35243 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T17:27:21,984 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T17:27:21,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T17:27:21,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T17:27:21,984 DEBUG [M:0;d514dc944523:35243 {}] zookeeper.ZKUtil(347): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T17:27:21,984 WARN [M:0;d514dc944523:35243 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T17:27:21,984 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T17:27:21,984 INFO [M:0;d514dc944523:35243 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T17:27:21,984 INFO [M:0;d514dc944523:35243 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T17:27:21,984 DEBUG [M:0;d514dc944523:35243 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T17:27:21,984 INFO [M:0;d514dc944523:35243 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:27:21,984 DEBUG [M:0;d514dc944523:35243 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:27:21,984 DEBUG [M:0;d514dc944523:35243 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T17:27:21,984 DEBUG [M:0;d514dc944523:35243 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:27:21,985 INFO [M:0;d514dc944523:35243 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=769.97 KB heapSize=947.30 KB 2024-11-20T17:27:22,000 DEBUG [M:0;d514dc944523:35243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2b43b3389e048d68d3a745ae135e619 is 82, key is hbase:meta,,1/info:regioninfo/1732123458729/Put/seqid=0 2024-11-20T17:27:22,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742432_1608 (size=5672) 2024-11-20T17:27:22,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T17:27:22,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44015-0x10015f622270001, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T17:27:22,080 INFO [RS:0;d514dc944523:44015 {}] regionserver.HRegionServer(1307): Exiting; stopping=d514dc944523,44015,1732123455293; zookeeper connection closed. 2024-11-20T17:27:22,080 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@695ad953 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@695ad953 2024-11-20T17:27:22,081 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T17:27:22,404 INFO [M:0;d514dc944523:35243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2195 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2b43b3389e048d68d3a745ae135e619 2024-11-20T17:27:22,425 DEBUG [M:0;d514dc944523:35243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/979a37792da44247be2ce7a7b739d3af is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x90/proc:d/1732123616444/Put/seqid=0 2024-11-20T17:27:22,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742433_1609 (size=43301) 2024-11-20T17:27:22,429 INFO [M:0;d514dc944523:35243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=769.41 KB at sequenceid=2195 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/979a37792da44247be2ce7a7b739d3af 2024-11-20T17:27:22,432 INFO [M:0;d514dc944523:35243 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 979a37792da44247be2ce7a7b739d3af 2024-11-20T17:27:22,446 DEBUG [M:0;d514dc944523:35243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ba72183eb4c447fb5c3a05efefa8856 is 69, key is d514dc944523,44015,1732123455293/rs:state/1732123457417/Put/seqid=0 2024-11-20T17:27:22,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073742434_1610 (size=5156) 2024-11-20T17:27:22,850 INFO [M:0;d514dc944523:35243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2195 (bloomFilter=true), to=hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ba72183eb4c447fb5c3a05efefa8856 2024-11-20T17:27:22,853 DEBUG [M:0;d514dc944523:35243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2b43b3389e048d68d3a745ae135e619 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d2b43b3389e048d68d3a745ae135e619 2024-11-20T17:27:22,855 INFO [M:0;d514dc944523:35243 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d2b43b3389e048d68d3a745ae135e619, entries=8, sequenceid=2195, filesize=5.5 K 2024-11-20T17:27:22,856 DEBUG [M:0;d514dc944523:35243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/979a37792da44247be2ce7a7b739d3af as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/979a37792da44247be2ce7a7b739d3af 2024-11-20T17:27:22,858 INFO [M:0;d514dc944523:35243 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 979a37792da44247be2ce7a7b739d3af 2024-11-20T17:27:22,858 INFO [M:0;d514dc944523:35243 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/979a37792da44247be2ce7a7b739d3af, entries=169, sequenceid=2195, filesize=42.3 K 2024-11-20T17:27:22,858 DEBUG [M:0;d514dc944523:35243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3ba72183eb4c447fb5c3a05efefa8856 as hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3ba72183eb4c447fb5c3a05efefa8856 2024-11-20T17:27:22,861 INFO [M:0;d514dc944523:35243 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:40219/user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3ba72183eb4c447fb5c3a05efefa8856, entries=1, sequenceid=2195, filesize=5.0 K 2024-11-20T17:27:22,861 INFO [M:0;d514dc944523:35243 {}] regionserver.HRegion(3040): Finished flush of dataSize ~769.97 KB/788448, heapSize ~947.01 KB/969736, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 877ms, sequenceid=2195, compaction requested=false 2024-11-20T17:27:22,863 INFO [M:0;d514dc944523:35243 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T17:27:22,863 DEBUG [M:0;d514dc944523:35243 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T17:27:22,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36709 is added to blk_1073741830_1006 (size=932322) 2024-11-20T17:27:22,865 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/fa0d92c2-12f4-fa29-db2d-05d1286966ff/MasterData/WALs/d514dc944523,35243,1732123454567/d514dc944523%2C35243%2C1732123454567.1732123456861 not finished, retry = 0 2024-11-20T17:27:22,966 INFO [M:0;d514dc944523:35243 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T17:27:22,966 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T17:27:22,966 INFO [M:0;d514dc944523:35243 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35243 2024-11-20T17:27:22,967 DEBUG [M:0;d514dc944523:35243 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/d514dc944523,35243,1732123454567 already deleted, retry=false 2024-11-20T17:27:23,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T17:27:23,069 INFO [M:0;d514dc944523:35243 {}] regionserver.HRegionServer(1307): Exiting; stopping=d514dc944523,35243,1732123454567; zookeeper connection closed. 2024-11-20T17:27:23,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35243-0x10015f622270000, quorum=127.0.0.1:56028, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T17:27:23,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bd2e890{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T17:27:23,076 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T17:27:23,076 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T17:27:23,076 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T17:27:23,076 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/hadoop.log.dir/,STOPPED} 2024-11-20T17:27:23,079 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T17:27:23,079 WARN [BP-1213880657-172.17.0.2-1732123451817 heartbeating to localhost/127.0.0.1:40219 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T17:27:23,079 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T17:27:23,079 WARN [BP-1213880657-172.17.0.2-1732123451817 heartbeating to localhost/127.0.0.1:40219 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1213880657-172.17.0.2-1732123451817 (Datanode Uuid f4cd3955-a200-4582-9dfd-eca8d5ad5f92) service to localhost/127.0.0.1:40219 2024-11-20T17:27:23,081 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/cluster_57d363a1-1306-466a-894b-887e1f24f2a3/dfs/data/data1/current/BP-1213880657-172.17.0.2-1732123451817 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T17:27:23,081 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/cluster_57d363a1-1306-466a-894b-887e1f24f2a3/dfs/data/data2/current/BP-1213880657-172.17.0.2-1732123451817 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T17:27:23,082 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T17:27:23,088 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f0d4558{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T17:27:23,089 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T17:27:23,089 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T17:27:23,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T17:27:23,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/abe2285d-e515-0265-153c-9542f79ac978/hadoop.log.dir/,STOPPED} 2024-11-20T17:27:23,105 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T17:27:23,224 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down